SmolLM2-360M-japanese_token-1000 / trainer_state.json
kajuma's picture
Upload folder using huggingface_hub
dde227a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.08769177437454527,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.769177437454527e-05,
"grad_norm": 7.59375,
"learning_rate": 3e-06,
"loss": 4.1911,
"step": 1
},
{
"epoch": 0.00017538354874909053,
"grad_norm": 7.6875,
"learning_rate": 6e-06,
"loss": 4.1743,
"step": 2
},
{
"epoch": 0.0002630753231236358,
"grad_norm": 8.1875,
"learning_rate": 9e-06,
"loss": 4.1165,
"step": 3
},
{
"epoch": 0.00035076709749818106,
"grad_norm": 8.0,
"learning_rate": 1.2e-05,
"loss": 4.2652,
"step": 4
},
{
"epoch": 0.00043845887187272633,
"grad_norm": 7.9375,
"learning_rate": 1.5e-05,
"loss": 4.1691,
"step": 5
},
{
"epoch": 0.0005261506462472716,
"grad_norm": 8.0625,
"learning_rate": 1.8e-05,
"loss": 4.2089,
"step": 6
},
{
"epoch": 0.0006138424206218169,
"grad_norm": 8.25,
"learning_rate": 2.1000000000000002e-05,
"loss": 4.1677,
"step": 7
},
{
"epoch": 0.0007015341949963621,
"grad_norm": 7.8125,
"learning_rate": 2.4e-05,
"loss": 4.1564,
"step": 8
},
{
"epoch": 0.0007892259693709074,
"grad_norm": 7.75,
"learning_rate": 2.7e-05,
"loss": 4.1722,
"step": 9
},
{
"epoch": 0.0008769177437454527,
"grad_norm": 7.3125,
"learning_rate": 3e-05,
"loss": 4.1554,
"step": 10
},
{
"epoch": 0.0009646095181199979,
"grad_norm": 7.53125,
"learning_rate": 3.2999999999999996e-05,
"loss": 4.1511,
"step": 11
},
{
"epoch": 0.0010523012924945432,
"grad_norm": 7.1875,
"learning_rate": 3.6e-05,
"loss": 4.152,
"step": 12
},
{
"epoch": 0.0011399930668690886,
"grad_norm": 6.8125,
"learning_rate": 3.9e-05,
"loss": 4.1581,
"step": 13
},
{
"epoch": 0.0012276848412436337,
"grad_norm": 6.625,
"learning_rate": 4.2000000000000004e-05,
"loss": 4.0485,
"step": 14
},
{
"epoch": 0.001315376615618179,
"grad_norm": 6.28125,
"learning_rate": 4.4999999999999996e-05,
"loss": 4.083,
"step": 15
},
{
"epoch": 0.0014030683899927242,
"grad_norm": 5.625,
"learning_rate": 4.8e-05,
"loss": 4.075,
"step": 16
},
{
"epoch": 0.0014907601643672696,
"grad_norm": 5.625,
"learning_rate": 5.1000000000000006e-05,
"loss": 4.0571,
"step": 17
},
{
"epoch": 0.0015784519387418148,
"grad_norm": 5.53125,
"learning_rate": 5.4e-05,
"loss": 4.0213,
"step": 18
},
{
"epoch": 0.0016661437131163602,
"grad_norm": 4.5625,
"learning_rate": 5.7e-05,
"loss": 4.0436,
"step": 19
},
{
"epoch": 0.0017538354874909053,
"grad_norm": 4.6875,
"learning_rate": 6e-05,
"loss": 4.0306,
"step": 20
},
{
"epoch": 0.0018415272618654507,
"grad_norm": 4.125,
"learning_rate": 6.3e-05,
"loss": 4.0364,
"step": 21
},
{
"epoch": 0.0019292190362399958,
"grad_norm": 3.875,
"learning_rate": 6.599999999999999e-05,
"loss": 3.9774,
"step": 22
},
{
"epoch": 0.002016910810614541,
"grad_norm": 3.421875,
"learning_rate": 6.9e-05,
"loss": 3.9513,
"step": 23
},
{
"epoch": 0.0021046025849890864,
"grad_norm": 3.21875,
"learning_rate": 7.2e-05,
"loss": 3.9453,
"step": 24
},
{
"epoch": 0.0021922943593636317,
"grad_norm": 2.734375,
"learning_rate": 7.500000000000001e-05,
"loss": 3.9338,
"step": 25
},
{
"epoch": 0.002279986133738177,
"grad_norm": 2.375,
"learning_rate": 7.8e-05,
"loss": 3.9318,
"step": 26
},
{
"epoch": 0.0023676779081127225,
"grad_norm": 1.921875,
"learning_rate": 8.1e-05,
"loss": 3.8758,
"step": 27
},
{
"epoch": 0.0024553696824872674,
"grad_norm": 1.828125,
"learning_rate": 8.400000000000001e-05,
"loss": 3.8885,
"step": 28
},
{
"epoch": 0.002543061456861813,
"grad_norm": 1.765625,
"learning_rate": 8.7e-05,
"loss": 3.8412,
"step": 29
},
{
"epoch": 0.002630753231236358,
"grad_norm": 1.65625,
"learning_rate": 8.999999999999999e-05,
"loss": 3.8114,
"step": 30
},
{
"epoch": 0.0027184450056109036,
"grad_norm": 1.5234375,
"learning_rate": 9.3e-05,
"loss": 3.778,
"step": 31
},
{
"epoch": 0.0028061367799854485,
"grad_norm": 1.515625,
"learning_rate": 9.6e-05,
"loss": 3.7355,
"step": 32
},
{
"epoch": 0.002893828554359994,
"grad_norm": 1.5,
"learning_rate": 9.900000000000001e-05,
"loss": 3.7612,
"step": 33
},
{
"epoch": 0.0029815203287345392,
"grad_norm": 1.390625,
"learning_rate": 0.00010200000000000001,
"loss": 3.7869,
"step": 34
},
{
"epoch": 0.0030692121031090846,
"grad_norm": 1.328125,
"learning_rate": 0.00010500000000000002,
"loss": 3.6998,
"step": 35
},
{
"epoch": 0.0031569038774836296,
"grad_norm": 1.3203125,
"learning_rate": 0.000108,
"loss": 3.645,
"step": 36
},
{
"epoch": 0.003244595651858175,
"grad_norm": 1.265625,
"learning_rate": 0.000111,
"loss": 3.6778,
"step": 37
},
{
"epoch": 0.0033322874262327203,
"grad_norm": 1.28125,
"learning_rate": 0.000114,
"loss": 3.6011,
"step": 38
},
{
"epoch": 0.0034199792006072657,
"grad_norm": 1.28125,
"learning_rate": 0.000117,
"loss": 3.6147,
"step": 39
},
{
"epoch": 0.0035076709749818106,
"grad_norm": 1.2265625,
"learning_rate": 0.00012,
"loss": 3.5427,
"step": 40
},
{
"epoch": 0.003595362749356356,
"grad_norm": 1.1484375,
"learning_rate": 0.000123,
"loss": 3.5252,
"step": 41
},
{
"epoch": 0.0036830545237309014,
"grad_norm": 1.140625,
"learning_rate": 0.000126,
"loss": 3.5946,
"step": 42
},
{
"epoch": 0.0037707462981054467,
"grad_norm": 1.1328125,
"learning_rate": 0.000129,
"loss": 3.498,
"step": 43
},
{
"epoch": 0.0038584380724799917,
"grad_norm": 1.4140625,
"learning_rate": 0.00013199999999999998,
"loss": 3.4045,
"step": 44
},
{
"epoch": 0.0039461298468545375,
"grad_norm": 1.46875,
"learning_rate": 0.000135,
"loss": 3.4418,
"step": 45
},
{
"epoch": 0.004033821621229082,
"grad_norm": 1.5390625,
"learning_rate": 0.000138,
"loss": 3.3624,
"step": 46
},
{
"epoch": 0.004121513395603627,
"grad_norm": 1.6328125,
"learning_rate": 0.000141,
"loss": 3.3476,
"step": 47
},
{
"epoch": 0.004209205169978173,
"grad_norm": 1.5078125,
"learning_rate": 0.000144,
"loss": 3.2416,
"step": 48
},
{
"epoch": 0.004296896944352718,
"grad_norm": 1.171875,
"learning_rate": 0.000147,
"loss": 3.263,
"step": 49
},
{
"epoch": 0.0043845887187272635,
"grad_norm": 1.1328125,
"learning_rate": 0.00015000000000000001,
"loss": 3.2391,
"step": 50
},
{
"epoch": 0.004472280493101809,
"grad_norm": 0.8671875,
"learning_rate": 0.000153,
"loss": 3.1366,
"step": 51
},
{
"epoch": 0.004559972267476354,
"grad_norm": 0.75390625,
"learning_rate": 0.000156,
"loss": 3.103,
"step": 52
},
{
"epoch": 0.0046476640418509,
"grad_norm": 0.77734375,
"learning_rate": 0.000159,
"loss": 3.1938,
"step": 53
},
{
"epoch": 0.004735355816225445,
"grad_norm": 0.69140625,
"learning_rate": 0.000162,
"loss": 3.1503,
"step": 54
},
{
"epoch": 0.0048230475905999895,
"grad_norm": 0.70703125,
"learning_rate": 0.000165,
"loss": 3.1695,
"step": 55
},
{
"epoch": 0.004910739364974535,
"grad_norm": 0.67578125,
"learning_rate": 0.00016800000000000002,
"loss": 3.0348,
"step": 56
},
{
"epoch": 0.00499843113934908,
"grad_norm": 0.68359375,
"learning_rate": 0.000171,
"loss": 3.0016,
"step": 57
},
{
"epoch": 0.005086122913723626,
"grad_norm": 0.640625,
"learning_rate": 0.000174,
"loss": 2.9536,
"step": 58
},
{
"epoch": 0.005173814688098171,
"grad_norm": 0.59765625,
"learning_rate": 0.000177,
"loss": 2.9378,
"step": 59
},
{
"epoch": 0.005261506462472716,
"grad_norm": 0.56640625,
"learning_rate": 0.00017999999999999998,
"loss": 2.9442,
"step": 60
},
{
"epoch": 0.005349198236847262,
"grad_norm": 0.55078125,
"learning_rate": 0.000183,
"loss": 2.8868,
"step": 61
},
{
"epoch": 0.005436890011221807,
"grad_norm": 0.55078125,
"learning_rate": 0.000186,
"loss": 2.8555,
"step": 62
},
{
"epoch": 0.005524581785596352,
"grad_norm": 0.55078125,
"learning_rate": 0.000189,
"loss": 2.8699,
"step": 63
},
{
"epoch": 0.005612273559970897,
"grad_norm": 0.59375,
"learning_rate": 0.000192,
"loss": 2.9004,
"step": 64
},
{
"epoch": 0.005699965334345442,
"grad_norm": 0.625,
"learning_rate": 0.00019500000000000002,
"loss": 2.8415,
"step": 65
},
{
"epoch": 0.005787657108719988,
"grad_norm": 0.5703125,
"learning_rate": 0.00019800000000000002,
"loss": 2.756,
"step": 66
},
{
"epoch": 0.005875348883094533,
"grad_norm": 0.54296875,
"learning_rate": 0.000201,
"loss": 2.8199,
"step": 67
},
{
"epoch": 0.0059630406574690785,
"grad_norm": 0.52734375,
"learning_rate": 0.00020400000000000003,
"loss": 2.753,
"step": 68
},
{
"epoch": 0.006050732431843624,
"grad_norm": 0.46484375,
"learning_rate": 0.00020700000000000002,
"loss": 2.677,
"step": 69
},
{
"epoch": 0.006138424206218169,
"grad_norm": 0.5546875,
"learning_rate": 0.00021000000000000004,
"loss": 2.7099,
"step": 70
},
{
"epoch": 0.006226115980592715,
"grad_norm": 0.5859375,
"learning_rate": 0.00021299999999999997,
"loss": 2.6848,
"step": 71
},
{
"epoch": 0.006313807754967259,
"grad_norm": 0.439453125,
"learning_rate": 0.000216,
"loss": 2.6327,
"step": 72
},
{
"epoch": 0.0064014995293418045,
"grad_norm": 0.43359375,
"learning_rate": 0.00021899999999999998,
"loss": 2.6438,
"step": 73
},
{
"epoch": 0.00648919130371635,
"grad_norm": 0.44921875,
"learning_rate": 0.000222,
"loss": 2.6331,
"step": 74
},
{
"epoch": 0.006576883078090895,
"grad_norm": 0.4296875,
"learning_rate": 0.000225,
"loss": 2.616,
"step": 75
},
{
"epoch": 0.006664574852465441,
"grad_norm": 0.3828125,
"learning_rate": 0.000228,
"loss": 2.4532,
"step": 76
},
{
"epoch": 0.006752266626839986,
"grad_norm": 0.318359375,
"learning_rate": 0.000231,
"loss": 2.4668,
"step": 77
},
{
"epoch": 0.006839958401214531,
"grad_norm": 0.3203125,
"learning_rate": 0.000234,
"loss": 2.4524,
"step": 78
},
{
"epoch": 0.006927650175589077,
"grad_norm": 0.353515625,
"learning_rate": 0.00023700000000000001,
"loss": 2.4424,
"step": 79
},
{
"epoch": 0.007015341949963621,
"grad_norm": 0.30859375,
"learning_rate": 0.00024,
"loss": 2.403,
"step": 80
},
{
"epoch": 0.007103033724338167,
"grad_norm": 0.294921875,
"learning_rate": 0.00024300000000000002,
"loss": 2.4171,
"step": 81
},
{
"epoch": 0.007190725498712712,
"grad_norm": 0.298828125,
"learning_rate": 0.000246,
"loss": 2.4317,
"step": 82
},
{
"epoch": 0.007278417273087257,
"grad_norm": 0.259765625,
"learning_rate": 0.00024900000000000004,
"loss": 2.3678,
"step": 83
},
{
"epoch": 0.007366109047461803,
"grad_norm": 0.341796875,
"learning_rate": 0.000252,
"loss": 2.3322,
"step": 84
},
{
"epoch": 0.007453800821836348,
"grad_norm": 0.310546875,
"learning_rate": 0.000255,
"loss": 2.2875,
"step": 85
},
{
"epoch": 0.0075414925962108935,
"grad_norm": 0.248046875,
"learning_rate": 0.000258,
"loss": 2.2755,
"step": 86
},
{
"epoch": 0.007629184370585439,
"grad_norm": 0.2578125,
"learning_rate": 0.000261,
"loss": 2.245,
"step": 87
},
{
"epoch": 0.007716876144959983,
"grad_norm": 0.220703125,
"learning_rate": 0.00026399999999999997,
"loss": 2.1879,
"step": 88
},
{
"epoch": 0.007804567919334529,
"grad_norm": 0.279296875,
"learning_rate": 0.000267,
"loss": 2.263,
"step": 89
},
{
"epoch": 0.007892259693709075,
"grad_norm": 0.240234375,
"learning_rate": 0.00027,
"loss": 2.2339,
"step": 90
},
{
"epoch": 0.00797995146808362,
"grad_norm": 0.23828125,
"learning_rate": 0.000273,
"loss": 2.2315,
"step": 91
},
{
"epoch": 0.008067643242458164,
"grad_norm": 0.2119140625,
"learning_rate": 0.000276,
"loss": 2.1785,
"step": 92
},
{
"epoch": 0.00815533501683271,
"grad_norm": 0.216796875,
"learning_rate": 0.000279,
"loss": 2.1301,
"step": 93
},
{
"epoch": 0.008243026791207255,
"grad_norm": 0.203125,
"learning_rate": 0.000282,
"loss": 2.1555,
"step": 94
},
{
"epoch": 0.008330718565581801,
"grad_norm": 0.2734375,
"learning_rate": 0.000285,
"loss": 2.0664,
"step": 95
},
{
"epoch": 0.008418410339956345,
"grad_norm": 0.212890625,
"learning_rate": 0.000288,
"loss": 2.0808,
"step": 96
},
{
"epoch": 0.008506102114330892,
"grad_norm": 0.2060546875,
"learning_rate": 0.000291,
"loss": 1.9987,
"step": 97
},
{
"epoch": 0.008593793888705436,
"grad_norm": 0.2177734375,
"learning_rate": 0.000294,
"loss": 2.0368,
"step": 98
},
{
"epoch": 0.008681485663079982,
"grad_norm": 0.1953125,
"learning_rate": 0.000297,
"loss": 2.0081,
"step": 99
},
{
"epoch": 0.008769177437454527,
"grad_norm": 0.28515625,
"learning_rate": 0.00030000000000000003,
"loss": 2.0451,
"step": 100
},
{
"epoch": 0.008856869211829071,
"grad_norm": 0.1875,
"learning_rate": 0.00030300000000000005,
"loss": 1.9185,
"step": 101
},
{
"epoch": 0.008944560986203618,
"grad_norm": 0.18359375,
"learning_rate": 0.000306,
"loss": 1.9602,
"step": 102
},
{
"epoch": 0.009032252760578162,
"grad_norm": 0.197265625,
"learning_rate": 0.000309,
"loss": 1.9397,
"step": 103
},
{
"epoch": 0.009119944534952708,
"grad_norm": 0.271484375,
"learning_rate": 0.000312,
"loss": 1.9686,
"step": 104
},
{
"epoch": 0.009207636309327253,
"grad_norm": 0.2314453125,
"learning_rate": 0.000315,
"loss": 1.903,
"step": 105
},
{
"epoch": 0.0092953280837018,
"grad_norm": 0.1923828125,
"learning_rate": 0.000318,
"loss": 1.9059,
"step": 106
},
{
"epoch": 0.009383019858076344,
"grad_norm": 0.189453125,
"learning_rate": 0.000321,
"loss": 1.8646,
"step": 107
},
{
"epoch": 0.00947071163245089,
"grad_norm": 0.1826171875,
"learning_rate": 0.000324,
"loss": 1.8592,
"step": 108
},
{
"epoch": 0.009558403406825434,
"grad_norm": 0.2314453125,
"learning_rate": 0.000327,
"loss": 1.8891,
"step": 109
},
{
"epoch": 0.009646095181199979,
"grad_norm": 0.1474609375,
"learning_rate": 0.00033,
"loss": 1.7582,
"step": 110
},
{
"epoch": 0.009733786955574525,
"grad_norm": 0.1708984375,
"learning_rate": 0.000333,
"loss": 1.7959,
"step": 111
},
{
"epoch": 0.00982147872994907,
"grad_norm": 0.2255859375,
"learning_rate": 0.00033600000000000004,
"loss": 1.8503,
"step": 112
},
{
"epoch": 0.009909170504323616,
"grad_norm": 0.1640625,
"learning_rate": 0.000339,
"loss": 1.8528,
"step": 113
},
{
"epoch": 0.00999686227869816,
"grad_norm": 0.1474609375,
"learning_rate": 0.000342,
"loss": 1.7961,
"step": 114
},
{
"epoch": 0.010084554053072707,
"grad_norm": 0.1337890625,
"learning_rate": 0.00034500000000000004,
"loss": 1.7692,
"step": 115
},
{
"epoch": 0.010172245827447251,
"grad_norm": 0.1376953125,
"learning_rate": 0.000348,
"loss": 1.781,
"step": 116
},
{
"epoch": 0.010259937601821796,
"grad_norm": 0.1669921875,
"learning_rate": 0.000351,
"loss": 1.7401,
"step": 117
},
{
"epoch": 0.010347629376196342,
"grad_norm": 0.1328125,
"learning_rate": 0.000354,
"loss": 1.7695,
"step": 118
},
{
"epoch": 0.010435321150570886,
"grad_norm": 0.1337890625,
"learning_rate": 0.000357,
"loss": 1.744,
"step": 119
},
{
"epoch": 0.010523012924945433,
"grad_norm": 0.166015625,
"learning_rate": 0.00035999999999999997,
"loss": 1.7182,
"step": 120
},
{
"epoch": 0.010610704699319977,
"grad_norm": 0.1474609375,
"learning_rate": 0.000363,
"loss": 1.7426,
"step": 121
},
{
"epoch": 0.010698396473694523,
"grad_norm": 0.1416015625,
"learning_rate": 0.000366,
"loss": 1.6804,
"step": 122
},
{
"epoch": 0.010786088248069068,
"grad_norm": 0.12353515625,
"learning_rate": 0.000369,
"loss": 1.753,
"step": 123
},
{
"epoch": 0.010873780022443614,
"grad_norm": 0.14453125,
"learning_rate": 0.000372,
"loss": 1.7363,
"step": 124
},
{
"epoch": 0.010961471796818159,
"grad_norm": 0.138671875,
"learning_rate": 0.000375,
"loss": 1.7182,
"step": 125
},
{
"epoch": 0.011049163571192703,
"grad_norm": 0.158203125,
"learning_rate": 0.000378,
"loss": 1.6622,
"step": 126
},
{
"epoch": 0.01113685534556725,
"grad_norm": 0.1376953125,
"learning_rate": 0.000381,
"loss": 1.6381,
"step": 127
},
{
"epoch": 0.011224547119941794,
"grad_norm": 0.1142578125,
"learning_rate": 0.000384,
"loss": 1.6721,
"step": 128
},
{
"epoch": 0.01131223889431634,
"grad_norm": 0.1669921875,
"learning_rate": 0.00038700000000000003,
"loss": 1.6262,
"step": 129
},
{
"epoch": 0.011399930668690885,
"grad_norm": 0.1376953125,
"learning_rate": 0.00039000000000000005,
"loss": 1.7461,
"step": 130
},
{
"epoch": 0.011487622443065431,
"grad_norm": 0.2353515625,
"learning_rate": 0.000393,
"loss": 1.6759,
"step": 131
},
{
"epoch": 0.011575314217439975,
"grad_norm": 0.208984375,
"learning_rate": 0.00039600000000000003,
"loss": 1.6878,
"step": 132
},
{
"epoch": 0.011663005991814522,
"grad_norm": 0.29296875,
"learning_rate": 0.00039900000000000005,
"loss": 1.6794,
"step": 133
},
{
"epoch": 0.011750697766189066,
"grad_norm": 0.2080078125,
"learning_rate": 0.000402,
"loss": 1.6118,
"step": 134
},
{
"epoch": 0.01183838954056361,
"grad_norm": 0.1982421875,
"learning_rate": 0.00040500000000000003,
"loss": 1.6834,
"step": 135
},
{
"epoch": 0.011926081314938157,
"grad_norm": 0.1474609375,
"learning_rate": 0.00040800000000000005,
"loss": 1.6754,
"step": 136
},
{
"epoch": 0.012013773089312701,
"grad_norm": 0.1572265625,
"learning_rate": 0.000411,
"loss": 1.6149,
"step": 137
},
{
"epoch": 0.012101464863687248,
"grad_norm": 0.10546875,
"learning_rate": 0.00041400000000000003,
"loss": 1.7056,
"step": 138
},
{
"epoch": 0.012189156638061792,
"grad_norm": 0.1650390625,
"learning_rate": 0.00041700000000000005,
"loss": 1.6293,
"step": 139
},
{
"epoch": 0.012276848412436338,
"grad_norm": 0.140625,
"learning_rate": 0.00042000000000000007,
"loss": 1.6483,
"step": 140
},
{
"epoch": 0.012364540186810883,
"grad_norm": 0.1181640625,
"learning_rate": 0.000423,
"loss": 1.6171,
"step": 141
},
{
"epoch": 0.01245223196118543,
"grad_norm": 0.12255859375,
"learning_rate": 0.00042599999999999995,
"loss": 1.6129,
"step": 142
},
{
"epoch": 0.012539923735559974,
"grad_norm": 0.1337890625,
"learning_rate": 0.00042899999999999997,
"loss": 1.6066,
"step": 143
},
{
"epoch": 0.012627615509934518,
"grad_norm": 0.154296875,
"learning_rate": 0.000432,
"loss": 1.5817,
"step": 144
},
{
"epoch": 0.012715307284309064,
"grad_norm": 0.1162109375,
"learning_rate": 0.000435,
"loss": 1.666,
"step": 145
},
{
"epoch": 0.012802999058683609,
"grad_norm": 0.1123046875,
"learning_rate": 0.00043799999999999997,
"loss": 1.6477,
"step": 146
},
{
"epoch": 0.012890690833058155,
"grad_norm": 0.1533203125,
"learning_rate": 0.000441,
"loss": 1.5617,
"step": 147
},
{
"epoch": 0.0129783826074327,
"grad_norm": 0.154296875,
"learning_rate": 0.000444,
"loss": 1.5307,
"step": 148
},
{
"epoch": 0.013066074381807246,
"grad_norm": 0.09716796875,
"learning_rate": 0.00044699999999999997,
"loss": 1.5576,
"step": 149
},
{
"epoch": 0.01315376615618179,
"grad_norm": 0.1533203125,
"learning_rate": 0.00045,
"loss": 1.5612,
"step": 150
},
{
"epoch": 0.013241457930556335,
"grad_norm": 0.1064453125,
"learning_rate": 0.000453,
"loss": 1.5933,
"step": 151
},
{
"epoch": 0.013329149704930881,
"grad_norm": 0.140625,
"learning_rate": 0.000456,
"loss": 1.5505,
"step": 152
},
{
"epoch": 0.013416841479305426,
"grad_norm": 0.11376953125,
"learning_rate": 0.000459,
"loss": 1.4969,
"step": 153
},
{
"epoch": 0.013504533253679972,
"grad_norm": 0.1318359375,
"learning_rate": 0.000462,
"loss": 1.5668,
"step": 154
},
{
"epoch": 0.013592225028054516,
"grad_norm": 0.095703125,
"learning_rate": 0.000465,
"loss": 1.5287,
"step": 155
},
{
"epoch": 0.013679916802429063,
"grad_norm": 0.1044921875,
"learning_rate": 0.000468,
"loss": 1.545,
"step": 156
},
{
"epoch": 0.013767608576803607,
"grad_norm": 0.1298828125,
"learning_rate": 0.000471,
"loss": 1.615,
"step": 157
},
{
"epoch": 0.013855300351178153,
"grad_norm": 0.134765625,
"learning_rate": 0.00047400000000000003,
"loss": 1.5476,
"step": 158
},
{
"epoch": 0.013942992125552698,
"grad_norm": 0.10546875,
"learning_rate": 0.000477,
"loss": 1.5439,
"step": 159
},
{
"epoch": 0.014030683899927242,
"grad_norm": 0.189453125,
"learning_rate": 0.00048,
"loss": 1.5647,
"step": 160
},
{
"epoch": 0.014118375674301789,
"grad_norm": 0.11669921875,
"learning_rate": 0.00048300000000000003,
"loss": 1.6254,
"step": 161
},
{
"epoch": 0.014206067448676333,
"grad_norm": 0.10791015625,
"learning_rate": 0.00048600000000000005,
"loss": 1.4983,
"step": 162
},
{
"epoch": 0.01429375922305088,
"grad_norm": 0.11474609375,
"learning_rate": 0.0004890000000000001,
"loss": 1.5548,
"step": 163
},
{
"epoch": 0.014381450997425424,
"grad_norm": 0.11376953125,
"learning_rate": 0.000492,
"loss": 1.4867,
"step": 164
},
{
"epoch": 0.01446914277179997,
"grad_norm": 0.1162109375,
"learning_rate": 0.000495,
"loss": 1.6054,
"step": 165
},
{
"epoch": 0.014556834546174515,
"grad_norm": 0.09716796875,
"learning_rate": 0.0004980000000000001,
"loss": 1.553,
"step": 166
},
{
"epoch": 0.014644526320549061,
"grad_norm": 0.162109375,
"learning_rate": 0.000501,
"loss": 1.568,
"step": 167
},
{
"epoch": 0.014732218094923605,
"grad_norm": 0.11328125,
"learning_rate": 0.000504,
"loss": 1.5792,
"step": 168
},
{
"epoch": 0.01481990986929815,
"grad_norm": 0.1044921875,
"learning_rate": 0.0005070000000000001,
"loss": 1.5449,
"step": 169
},
{
"epoch": 0.014907601643672696,
"grad_norm": 0.1103515625,
"learning_rate": 0.00051,
"loss": 1.5593,
"step": 170
},
{
"epoch": 0.01499529341804724,
"grad_norm": 0.1064453125,
"learning_rate": 0.000513,
"loss": 1.4883,
"step": 171
},
{
"epoch": 0.015082985192421787,
"grad_norm": 0.1064453125,
"learning_rate": 0.000516,
"loss": 1.529,
"step": 172
},
{
"epoch": 0.015170676966796331,
"grad_norm": 0.10302734375,
"learning_rate": 0.0005189999999999999,
"loss": 1.466,
"step": 173
},
{
"epoch": 0.015258368741170878,
"grad_norm": 0.09033203125,
"learning_rate": 0.000522,
"loss": 1.4907,
"step": 174
},
{
"epoch": 0.015346060515545422,
"grad_norm": 0.16796875,
"learning_rate": 0.000525,
"loss": 1.5739,
"step": 175
},
{
"epoch": 0.015433752289919967,
"grad_norm": 0.115234375,
"learning_rate": 0.0005279999999999999,
"loss": 1.5142,
"step": 176
},
{
"epoch": 0.015521444064294513,
"grad_norm": 0.1904296875,
"learning_rate": 0.000531,
"loss": 1.4573,
"step": 177
},
{
"epoch": 0.015609135838669057,
"grad_norm": 0.1142578125,
"learning_rate": 0.000534,
"loss": 1.5181,
"step": 178
},
{
"epoch": 0.015696827613043602,
"grad_norm": 0.1572265625,
"learning_rate": 0.000537,
"loss": 1.4548,
"step": 179
},
{
"epoch": 0.01578451938741815,
"grad_norm": 0.10498046875,
"learning_rate": 0.00054,
"loss": 1.4154,
"step": 180
},
{
"epoch": 0.015872211161792694,
"grad_norm": 0.11376953125,
"learning_rate": 0.000543,
"loss": 1.4346,
"step": 181
},
{
"epoch": 0.01595990293616724,
"grad_norm": 0.103515625,
"learning_rate": 0.000546,
"loss": 1.6055,
"step": 182
},
{
"epoch": 0.016047594710541783,
"grad_norm": 0.11376953125,
"learning_rate": 0.000549,
"loss": 1.4598,
"step": 183
},
{
"epoch": 0.016135286484916328,
"grad_norm": 0.11572265625,
"learning_rate": 0.000552,
"loss": 1.4608,
"step": 184
},
{
"epoch": 0.016222978259290876,
"grad_norm": 0.099609375,
"learning_rate": 0.000555,
"loss": 1.5092,
"step": 185
},
{
"epoch": 0.01631067003366542,
"grad_norm": 0.099609375,
"learning_rate": 0.000558,
"loss": 1.5338,
"step": 186
},
{
"epoch": 0.016398361808039965,
"grad_norm": 0.1708984375,
"learning_rate": 0.000561,
"loss": 1.4916,
"step": 187
},
{
"epoch": 0.01648605358241451,
"grad_norm": 0.11376953125,
"learning_rate": 0.000564,
"loss": 1.4815,
"step": 188
},
{
"epoch": 0.016573745356789057,
"grad_norm": 0.2470703125,
"learning_rate": 0.000567,
"loss": 1.4502,
"step": 189
},
{
"epoch": 0.016661437131163602,
"grad_norm": 0.10302734375,
"learning_rate": 0.00057,
"loss": 1.5195,
"step": 190
},
{
"epoch": 0.016749128905538146,
"grad_norm": 0.15234375,
"learning_rate": 0.000573,
"loss": 1.545,
"step": 191
},
{
"epoch": 0.01683682067991269,
"grad_norm": 0.146484375,
"learning_rate": 0.000576,
"loss": 1.5517,
"step": 192
},
{
"epoch": 0.016924512454287235,
"grad_norm": 0.10791015625,
"learning_rate": 0.000579,
"loss": 1.5469,
"step": 193
},
{
"epoch": 0.017012204228661783,
"grad_norm": 0.1416015625,
"learning_rate": 0.000582,
"loss": 1.4311,
"step": 194
},
{
"epoch": 0.017099896003036328,
"grad_norm": 0.10888671875,
"learning_rate": 0.000585,
"loss": 1.5292,
"step": 195
},
{
"epoch": 0.017187587777410872,
"grad_norm": 0.1240234375,
"learning_rate": 0.000588,
"loss": 1.4326,
"step": 196
},
{
"epoch": 0.017275279551785417,
"grad_norm": 0.10205078125,
"learning_rate": 0.000591,
"loss": 1.4556,
"step": 197
},
{
"epoch": 0.017362971326159965,
"grad_norm": 0.09521484375,
"learning_rate": 0.000594,
"loss": 1.4719,
"step": 198
},
{
"epoch": 0.01745066310053451,
"grad_norm": 0.11572265625,
"learning_rate": 0.0005970000000000001,
"loss": 1.5113,
"step": 199
},
{
"epoch": 0.017538354874909054,
"grad_norm": 0.10498046875,
"learning_rate": 0.0006000000000000001,
"loss": 1.4525,
"step": 200
},
{
"epoch": 0.0176260466492836,
"grad_norm": 0.0888671875,
"learning_rate": 0.000603,
"loss": 1.4616,
"step": 201
},
{
"epoch": 0.017713738423658143,
"grad_norm": 0.1474609375,
"learning_rate": 0.0006060000000000001,
"loss": 1.4662,
"step": 202
},
{
"epoch": 0.01780143019803269,
"grad_norm": 0.1181640625,
"learning_rate": 0.0006090000000000001,
"loss": 1.4347,
"step": 203
},
{
"epoch": 0.017889121972407235,
"grad_norm": 0.09912109375,
"learning_rate": 0.000612,
"loss": 1.5131,
"step": 204
},
{
"epoch": 0.01797681374678178,
"grad_norm": 0.1220703125,
"learning_rate": 0.000615,
"loss": 1.4848,
"step": 205
},
{
"epoch": 0.018064505521156324,
"grad_norm": 0.10498046875,
"learning_rate": 0.000618,
"loss": 1.4201,
"step": 206
},
{
"epoch": 0.018152197295530872,
"grad_norm": 0.1103515625,
"learning_rate": 0.000621,
"loss": 1.4722,
"step": 207
},
{
"epoch": 0.018239889069905417,
"grad_norm": 0.0966796875,
"learning_rate": 0.000624,
"loss": 1.345,
"step": 208
},
{
"epoch": 0.01832758084427996,
"grad_norm": 0.08740234375,
"learning_rate": 0.000627,
"loss": 1.3573,
"step": 209
},
{
"epoch": 0.018415272618654506,
"grad_norm": 0.1494140625,
"learning_rate": 0.00063,
"loss": 1.4277,
"step": 210
},
{
"epoch": 0.01850296439302905,
"grad_norm": 0.0849609375,
"learning_rate": 0.000633,
"loss": 1.4629,
"step": 211
},
{
"epoch": 0.0185906561674036,
"grad_norm": 0.1142578125,
"learning_rate": 0.000636,
"loss": 1.4655,
"step": 212
},
{
"epoch": 0.018678347941778143,
"grad_norm": 0.1328125,
"learning_rate": 0.000639,
"loss": 1.4401,
"step": 213
},
{
"epoch": 0.018766039716152687,
"grad_norm": 0.1025390625,
"learning_rate": 0.000642,
"loss": 1.4852,
"step": 214
},
{
"epoch": 0.018853731490527232,
"grad_norm": 0.099609375,
"learning_rate": 0.000645,
"loss": 1.3802,
"step": 215
},
{
"epoch": 0.01894142326490178,
"grad_norm": 0.111328125,
"learning_rate": 0.000648,
"loss": 1.4288,
"step": 216
},
{
"epoch": 0.019029115039276324,
"grad_norm": 0.126953125,
"learning_rate": 0.000651,
"loss": 1.467,
"step": 217
},
{
"epoch": 0.01911680681365087,
"grad_norm": 0.11376953125,
"learning_rate": 0.000654,
"loss": 1.4569,
"step": 218
},
{
"epoch": 0.019204498588025413,
"grad_norm": 0.1064453125,
"learning_rate": 0.000657,
"loss": 1.4554,
"step": 219
},
{
"epoch": 0.019292190362399958,
"grad_norm": 0.154296875,
"learning_rate": 0.00066,
"loss": 1.4055,
"step": 220
},
{
"epoch": 0.019379882136774506,
"grad_norm": 0.08837890625,
"learning_rate": 0.0006630000000000001,
"loss": 1.4994,
"step": 221
},
{
"epoch": 0.01946757391114905,
"grad_norm": 0.11767578125,
"learning_rate": 0.000666,
"loss": 1.4716,
"step": 222
},
{
"epoch": 0.019555265685523595,
"grad_norm": 0.10498046875,
"learning_rate": 0.000669,
"loss": 1.3835,
"step": 223
},
{
"epoch": 0.01964295745989814,
"grad_norm": 0.11474609375,
"learning_rate": 0.0006720000000000001,
"loss": 1.3807,
"step": 224
},
{
"epoch": 0.019730649234272687,
"grad_norm": 0.1123046875,
"learning_rate": 0.000675,
"loss": 1.4361,
"step": 225
},
{
"epoch": 0.019818341008647232,
"grad_norm": 0.109375,
"learning_rate": 0.000678,
"loss": 1.4103,
"step": 226
},
{
"epoch": 0.019906032783021776,
"grad_norm": 0.1201171875,
"learning_rate": 0.0006810000000000001,
"loss": 1.4801,
"step": 227
},
{
"epoch": 0.01999372455739632,
"grad_norm": 0.10107421875,
"learning_rate": 0.000684,
"loss": 1.4361,
"step": 228
},
{
"epoch": 0.020081416331770865,
"grad_norm": 0.11328125,
"learning_rate": 0.000687,
"loss": 1.4731,
"step": 229
},
{
"epoch": 0.020169108106145413,
"grad_norm": 0.11962890625,
"learning_rate": 0.0006900000000000001,
"loss": 1.4505,
"step": 230
},
{
"epoch": 0.020256799880519958,
"grad_norm": 0.1484375,
"learning_rate": 0.000693,
"loss": 1.3999,
"step": 231
},
{
"epoch": 0.020344491654894502,
"grad_norm": 0.0859375,
"learning_rate": 0.000696,
"loss": 1.4207,
"step": 232
},
{
"epoch": 0.020432183429269047,
"grad_norm": 0.1025390625,
"learning_rate": 0.0006990000000000001,
"loss": 1.4174,
"step": 233
},
{
"epoch": 0.02051987520364359,
"grad_norm": 0.1279296875,
"learning_rate": 0.000702,
"loss": 1.4266,
"step": 234
},
{
"epoch": 0.02060756697801814,
"grad_norm": 0.10595703125,
"learning_rate": 0.000705,
"loss": 1.3834,
"step": 235
},
{
"epoch": 0.020695258752392684,
"grad_norm": 0.09326171875,
"learning_rate": 0.000708,
"loss": 1.3796,
"step": 236
},
{
"epoch": 0.02078295052676723,
"grad_norm": 0.1455078125,
"learning_rate": 0.0007109999999999999,
"loss": 1.4036,
"step": 237
},
{
"epoch": 0.020870642301141773,
"grad_norm": 0.1220703125,
"learning_rate": 0.000714,
"loss": 1.3561,
"step": 238
},
{
"epoch": 0.02095833407551632,
"grad_norm": 0.0849609375,
"learning_rate": 0.000717,
"loss": 1.4045,
"step": 239
},
{
"epoch": 0.021046025849890865,
"grad_norm": 0.1435546875,
"learning_rate": 0.0007199999999999999,
"loss": 1.41,
"step": 240
},
{
"epoch": 0.02113371762426541,
"grad_norm": 0.1396484375,
"learning_rate": 0.000723,
"loss": 1.4192,
"step": 241
},
{
"epoch": 0.021221409398639954,
"grad_norm": 0.11328125,
"learning_rate": 0.000726,
"loss": 1.3841,
"step": 242
},
{
"epoch": 0.0213091011730145,
"grad_norm": 0.1240234375,
"learning_rate": 0.000729,
"loss": 1.3593,
"step": 243
},
{
"epoch": 0.021396792947389047,
"grad_norm": 0.1181640625,
"learning_rate": 0.000732,
"loss": 1.3946,
"step": 244
},
{
"epoch": 0.02148448472176359,
"grad_norm": 0.10400390625,
"learning_rate": 0.000735,
"loss": 1.4397,
"step": 245
},
{
"epoch": 0.021572176496138136,
"grad_norm": 0.10595703125,
"learning_rate": 0.000738,
"loss": 1.3494,
"step": 246
},
{
"epoch": 0.02165986827051268,
"grad_norm": 0.1396484375,
"learning_rate": 0.000741,
"loss": 1.4703,
"step": 247
},
{
"epoch": 0.02174756004488723,
"grad_norm": 0.11181640625,
"learning_rate": 0.000744,
"loss": 1.3589,
"step": 248
},
{
"epoch": 0.021835251819261773,
"grad_norm": 0.1220703125,
"learning_rate": 0.000747,
"loss": 1.3954,
"step": 249
},
{
"epoch": 0.021922943593636317,
"grad_norm": 0.1044921875,
"learning_rate": 0.00075,
"loss": 1.3917,
"step": 250
},
{
"epoch": 0.022010635368010862,
"grad_norm": 0.11376953125,
"learning_rate": 0.000753,
"loss": 1.3834,
"step": 251
},
{
"epoch": 0.022098327142385406,
"grad_norm": 0.0986328125,
"learning_rate": 0.000756,
"loss": 1.4159,
"step": 252
},
{
"epoch": 0.022186018916759954,
"grad_norm": 0.103515625,
"learning_rate": 0.000759,
"loss": 1.4357,
"step": 253
},
{
"epoch": 0.0222737106911345,
"grad_norm": 0.10888671875,
"learning_rate": 0.000762,
"loss": 1.4194,
"step": 254
},
{
"epoch": 0.022361402465509043,
"grad_norm": 0.09375,
"learning_rate": 0.0007650000000000001,
"loss": 1.3669,
"step": 255
},
{
"epoch": 0.022449094239883588,
"grad_norm": 0.095703125,
"learning_rate": 0.000768,
"loss": 1.4522,
"step": 256
},
{
"epoch": 0.022536786014258136,
"grad_norm": 0.1103515625,
"learning_rate": 0.000771,
"loss": 1.3672,
"step": 257
},
{
"epoch": 0.02262447778863268,
"grad_norm": 0.08984375,
"learning_rate": 0.0007740000000000001,
"loss": 1.3274,
"step": 258
},
{
"epoch": 0.022712169563007225,
"grad_norm": 0.1298828125,
"learning_rate": 0.000777,
"loss": 1.3584,
"step": 259
},
{
"epoch": 0.02279986133738177,
"grad_norm": 0.12158203125,
"learning_rate": 0.0007800000000000001,
"loss": 1.3933,
"step": 260
},
{
"epoch": 0.022887553111756314,
"grad_norm": 0.10888671875,
"learning_rate": 0.0007830000000000001,
"loss": 1.4353,
"step": 261
},
{
"epoch": 0.022975244886130862,
"grad_norm": 0.10205078125,
"learning_rate": 0.000786,
"loss": 1.3784,
"step": 262
},
{
"epoch": 0.023062936660505406,
"grad_norm": 0.1708984375,
"learning_rate": 0.0007890000000000001,
"loss": 1.4389,
"step": 263
},
{
"epoch": 0.02315062843487995,
"grad_norm": 0.10791015625,
"learning_rate": 0.0007920000000000001,
"loss": 1.3353,
"step": 264
},
{
"epoch": 0.023238320209254495,
"grad_norm": 0.208984375,
"learning_rate": 0.000795,
"loss": 1.375,
"step": 265
},
{
"epoch": 0.023326011983629043,
"grad_norm": 0.0849609375,
"learning_rate": 0.0007980000000000001,
"loss": 1.3568,
"step": 266
},
{
"epoch": 0.023413703758003588,
"grad_norm": 0.15625,
"learning_rate": 0.0008010000000000001,
"loss": 1.3975,
"step": 267
},
{
"epoch": 0.023501395532378132,
"grad_norm": 0.09716796875,
"learning_rate": 0.000804,
"loss": 1.4104,
"step": 268
},
{
"epoch": 0.023589087306752677,
"grad_norm": 0.1201171875,
"learning_rate": 0.0008070000000000001,
"loss": 1.4314,
"step": 269
},
{
"epoch": 0.02367677908112722,
"grad_norm": 0.08935546875,
"learning_rate": 0.0008100000000000001,
"loss": 1.3977,
"step": 270
},
{
"epoch": 0.02376447085550177,
"grad_norm": 0.087890625,
"learning_rate": 0.000813,
"loss": 1.4509,
"step": 271
},
{
"epoch": 0.023852162629876314,
"grad_norm": 0.154296875,
"learning_rate": 0.0008160000000000001,
"loss": 1.4231,
"step": 272
},
{
"epoch": 0.02393985440425086,
"grad_norm": 0.099609375,
"learning_rate": 0.0008190000000000001,
"loss": 1.381,
"step": 273
},
{
"epoch": 0.024027546178625403,
"grad_norm": 0.224609375,
"learning_rate": 0.000822,
"loss": 1.423,
"step": 274
},
{
"epoch": 0.02411523795299995,
"grad_norm": 0.16015625,
"learning_rate": 0.0008250000000000001,
"loss": 1.4066,
"step": 275
},
{
"epoch": 0.024202929727374495,
"grad_norm": 0.189453125,
"learning_rate": 0.0008280000000000001,
"loss": 1.374,
"step": 276
},
{
"epoch": 0.02429062150174904,
"grad_norm": 0.341796875,
"learning_rate": 0.0008310000000000001,
"loss": 1.3806,
"step": 277
},
{
"epoch": 0.024378313276123584,
"grad_norm": 0.1298828125,
"learning_rate": 0.0008340000000000001,
"loss": 1.4127,
"step": 278
},
{
"epoch": 0.02446600505049813,
"grad_norm": 0.220703125,
"learning_rate": 0.0008370000000000001,
"loss": 1.3966,
"step": 279
},
{
"epoch": 0.024553696824872677,
"grad_norm": 0.2138671875,
"learning_rate": 0.0008400000000000001,
"loss": 1.401,
"step": 280
},
{
"epoch": 0.02464138859924722,
"grad_norm": 0.1513671875,
"learning_rate": 0.0008430000000000001,
"loss": 1.4582,
"step": 281
},
{
"epoch": 0.024729080373621766,
"grad_norm": 0.138671875,
"learning_rate": 0.000846,
"loss": 1.3452,
"step": 282
},
{
"epoch": 0.02481677214799631,
"grad_norm": 0.09814453125,
"learning_rate": 0.0008489999999999999,
"loss": 1.3579,
"step": 283
},
{
"epoch": 0.02490446392237086,
"grad_norm": 0.162109375,
"learning_rate": 0.0008519999999999999,
"loss": 1.3803,
"step": 284
},
{
"epoch": 0.024992155696745403,
"grad_norm": 0.1279296875,
"learning_rate": 0.000855,
"loss": 1.3182,
"step": 285
},
{
"epoch": 0.025079847471119947,
"grad_norm": 0.1884765625,
"learning_rate": 0.0008579999999999999,
"loss": 1.4524,
"step": 286
},
{
"epoch": 0.025167539245494492,
"grad_norm": 0.126953125,
"learning_rate": 0.000861,
"loss": 1.3664,
"step": 287
},
{
"epoch": 0.025255231019869036,
"grad_norm": 0.1640625,
"learning_rate": 0.000864,
"loss": 1.4199,
"step": 288
},
{
"epoch": 0.025342922794243584,
"grad_norm": 0.1591796875,
"learning_rate": 0.0008669999999999999,
"loss": 1.3351,
"step": 289
},
{
"epoch": 0.02543061456861813,
"grad_norm": 0.111328125,
"learning_rate": 0.00087,
"loss": 1.3552,
"step": 290
},
{
"epoch": 0.025518306342992673,
"grad_norm": 0.193359375,
"learning_rate": 0.000873,
"loss": 1.3583,
"step": 291
},
{
"epoch": 0.025605998117367218,
"grad_norm": 0.10205078125,
"learning_rate": 0.0008759999999999999,
"loss": 1.3731,
"step": 292
},
{
"epoch": 0.025693689891741762,
"grad_norm": 0.181640625,
"learning_rate": 0.000879,
"loss": 1.3517,
"step": 293
},
{
"epoch": 0.02578138166611631,
"grad_norm": 0.09521484375,
"learning_rate": 0.000882,
"loss": 1.4209,
"step": 294
},
{
"epoch": 0.025869073440490855,
"grad_norm": 0.1357421875,
"learning_rate": 0.0008849999999999999,
"loss": 1.3834,
"step": 295
},
{
"epoch": 0.0259567652148654,
"grad_norm": 0.12353515625,
"learning_rate": 0.000888,
"loss": 1.4558,
"step": 296
},
{
"epoch": 0.026044456989239944,
"grad_norm": 0.10791015625,
"learning_rate": 0.000891,
"loss": 1.4401,
"step": 297
},
{
"epoch": 0.026132148763614492,
"grad_norm": 0.1630859375,
"learning_rate": 0.0008939999999999999,
"loss": 1.3981,
"step": 298
},
{
"epoch": 0.026219840537989036,
"grad_norm": 0.08740234375,
"learning_rate": 0.000897,
"loss": 1.3309,
"step": 299
},
{
"epoch": 0.02630753231236358,
"grad_norm": 0.240234375,
"learning_rate": 0.0009,
"loss": 1.34,
"step": 300
},
{
"epoch": 0.026395224086738125,
"grad_norm": 0.083984375,
"learning_rate": 0.0009029999999999999,
"loss": 1.3813,
"step": 301
},
{
"epoch": 0.02648291586111267,
"grad_norm": 0.2041015625,
"learning_rate": 0.000906,
"loss": 1.4285,
"step": 302
},
{
"epoch": 0.026570607635487218,
"grad_norm": 0.11572265625,
"learning_rate": 0.000909,
"loss": 1.3756,
"step": 303
},
{
"epoch": 0.026658299409861762,
"grad_norm": 0.119140625,
"learning_rate": 0.000912,
"loss": 1.394,
"step": 304
},
{
"epoch": 0.026745991184236307,
"grad_norm": 0.1552734375,
"learning_rate": 0.000915,
"loss": 1.4289,
"step": 305
},
{
"epoch": 0.02683368295861085,
"grad_norm": 0.09228515625,
"learning_rate": 0.000918,
"loss": 1.3585,
"step": 306
},
{
"epoch": 0.0269213747329854,
"grad_norm": 0.193359375,
"learning_rate": 0.000921,
"loss": 1.3685,
"step": 307
},
{
"epoch": 0.027009066507359944,
"grad_norm": 0.08984375,
"learning_rate": 0.000924,
"loss": 1.3597,
"step": 308
},
{
"epoch": 0.02709675828173449,
"grad_norm": 0.12451171875,
"learning_rate": 0.000927,
"loss": 1.3464,
"step": 309
},
{
"epoch": 0.027184450056109033,
"grad_norm": 0.11865234375,
"learning_rate": 0.00093,
"loss": 1.4275,
"step": 310
},
{
"epoch": 0.027272141830483577,
"grad_norm": 0.10693359375,
"learning_rate": 0.000933,
"loss": 1.3538,
"step": 311
},
{
"epoch": 0.027359833604858125,
"grad_norm": 0.08740234375,
"learning_rate": 0.000936,
"loss": 1.3189,
"step": 312
},
{
"epoch": 0.02744752537923267,
"grad_norm": 0.0908203125,
"learning_rate": 0.0009390000000000001,
"loss": 1.337,
"step": 313
},
{
"epoch": 0.027535217153607214,
"grad_norm": 0.099609375,
"learning_rate": 0.000942,
"loss": 1.3509,
"step": 314
},
{
"epoch": 0.02762290892798176,
"grad_norm": 0.10302734375,
"learning_rate": 0.000945,
"loss": 1.4155,
"step": 315
},
{
"epoch": 0.027710600702356307,
"grad_norm": 0.12060546875,
"learning_rate": 0.0009480000000000001,
"loss": 1.3987,
"step": 316
},
{
"epoch": 0.02779829247673085,
"grad_norm": 0.09765625,
"learning_rate": 0.000951,
"loss": 1.3961,
"step": 317
},
{
"epoch": 0.027885984251105396,
"grad_norm": 0.10888671875,
"learning_rate": 0.000954,
"loss": 1.3766,
"step": 318
},
{
"epoch": 0.02797367602547994,
"grad_norm": 0.1171875,
"learning_rate": 0.0009570000000000001,
"loss": 1.3738,
"step": 319
},
{
"epoch": 0.028061367799854485,
"grad_norm": 0.18359375,
"learning_rate": 0.00096,
"loss": 1.4111,
"step": 320
},
{
"epoch": 0.028149059574229033,
"grad_norm": 0.111328125,
"learning_rate": 0.000963,
"loss": 1.344,
"step": 321
},
{
"epoch": 0.028236751348603577,
"grad_norm": 0.2333984375,
"learning_rate": 0.0009660000000000001,
"loss": 1.3448,
"step": 322
},
{
"epoch": 0.028324443122978122,
"grad_norm": 0.09033203125,
"learning_rate": 0.000969,
"loss": 1.4107,
"step": 323
},
{
"epoch": 0.028412134897352666,
"grad_norm": 0.1708984375,
"learning_rate": 0.0009720000000000001,
"loss": 1.4008,
"step": 324
},
{
"epoch": 0.028499826671727214,
"grad_norm": 0.09228515625,
"learning_rate": 0.0009750000000000001,
"loss": 1.3442,
"step": 325
},
{
"epoch": 0.02858751844610176,
"grad_norm": 0.0966796875,
"learning_rate": 0.0009780000000000001,
"loss": 1.4229,
"step": 326
},
{
"epoch": 0.028675210220476303,
"grad_norm": 0.1123046875,
"learning_rate": 0.000981,
"loss": 1.3201,
"step": 327
},
{
"epoch": 0.028762901994850848,
"grad_norm": 0.10595703125,
"learning_rate": 0.000984,
"loss": 1.4149,
"step": 328
},
{
"epoch": 0.028850593769225392,
"grad_norm": 0.162109375,
"learning_rate": 0.000987,
"loss": 1.3379,
"step": 329
},
{
"epoch": 0.02893828554359994,
"grad_norm": 0.126953125,
"learning_rate": 0.00099,
"loss": 1.3845,
"step": 330
},
{
"epoch": 0.029025977317974485,
"grad_norm": 0.126953125,
"learning_rate": 0.0009930000000000002,
"loss": 1.3366,
"step": 331
},
{
"epoch": 0.02911366909234903,
"grad_norm": 0.130859375,
"learning_rate": 0.0009960000000000001,
"loss": 1.4185,
"step": 332
},
{
"epoch": 0.029201360866723574,
"grad_norm": 0.126953125,
"learning_rate": 0.000999,
"loss": 1.3812,
"step": 333
},
{
"epoch": 0.029289052641098122,
"grad_norm": 0.10400390625,
"learning_rate": 0.001002,
"loss": 1.3699,
"step": 334
},
{
"epoch": 0.029376744415472666,
"grad_norm": 0.1171875,
"learning_rate": 0.001005,
"loss": 1.3294,
"step": 335
},
{
"epoch": 0.02946443618984721,
"grad_norm": 0.1552734375,
"learning_rate": 0.001008,
"loss": 1.3399,
"step": 336
},
{
"epoch": 0.029552127964221755,
"grad_norm": 0.09765625,
"learning_rate": 0.0010110000000000002,
"loss": 1.3641,
"step": 337
},
{
"epoch": 0.0296398197385963,
"grad_norm": 0.1171875,
"learning_rate": 0.0010140000000000001,
"loss": 1.3034,
"step": 338
},
{
"epoch": 0.029727511512970848,
"grad_norm": 0.09375,
"learning_rate": 0.0010170000000000001,
"loss": 1.4665,
"step": 339
},
{
"epoch": 0.029815203287345392,
"grad_norm": 0.095703125,
"learning_rate": 0.00102,
"loss": 1.362,
"step": 340
},
{
"epoch": 0.029902895061719937,
"grad_norm": 0.1328125,
"learning_rate": 0.001023,
"loss": 1.4033,
"step": 341
},
{
"epoch": 0.02999058683609448,
"grad_norm": 0.091796875,
"learning_rate": 0.001026,
"loss": 1.3324,
"step": 342
},
{
"epoch": 0.030078278610469026,
"grad_norm": 0.130859375,
"learning_rate": 0.0010290000000000002,
"loss": 1.3564,
"step": 343
},
{
"epoch": 0.030165970384843574,
"grad_norm": 0.1044921875,
"learning_rate": 0.001032,
"loss": 1.4015,
"step": 344
},
{
"epoch": 0.03025366215921812,
"grad_norm": 0.1044921875,
"learning_rate": 0.001035,
"loss": 1.4183,
"step": 345
},
{
"epoch": 0.030341353933592663,
"grad_norm": 0.09716796875,
"learning_rate": 0.0010379999999999999,
"loss": 1.402,
"step": 346
},
{
"epoch": 0.030429045707967207,
"grad_norm": 0.1279296875,
"learning_rate": 0.001041,
"loss": 1.4356,
"step": 347
},
{
"epoch": 0.030516737482341755,
"grad_norm": 0.10888671875,
"learning_rate": 0.001044,
"loss": 1.3444,
"step": 348
},
{
"epoch": 0.0306044292567163,
"grad_norm": 0.1005859375,
"learning_rate": 0.001047,
"loss": 1.2408,
"step": 349
},
{
"epoch": 0.030692121031090844,
"grad_norm": 0.1181640625,
"learning_rate": 0.00105,
"loss": 1.3412,
"step": 350
},
{
"epoch": 0.03077981280546539,
"grad_norm": 0.107421875,
"learning_rate": 0.001053,
"loss": 1.3695,
"step": 351
},
{
"epoch": 0.030867504579839933,
"grad_norm": 0.10498046875,
"learning_rate": 0.0010559999999999999,
"loss": 1.2612,
"step": 352
},
{
"epoch": 0.03095519635421448,
"grad_norm": 0.126953125,
"learning_rate": 0.001059,
"loss": 1.3209,
"step": 353
},
{
"epoch": 0.031042888128589026,
"grad_norm": 0.11181640625,
"learning_rate": 0.001062,
"loss": 1.3619,
"step": 354
},
{
"epoch": 0.03113057990296357,
"grad_norm": 0.11669921875,
"learning_rate": 0.001065,
"loss": 1.3898,
"step": 355
},
{
"epoch": 0.031218271677338115,
"grad_norm": 0.1083984375,
"learning_rate": 0.001068,
"loss": 1.3756,
"step": 356
},
{
"epoch": 0.03130596345171266,
"grad_norm": 0.09814453125,
"learning_rate": 0.001071,
"loss": 1.3883,
"step": 357
},
{
"epoch": 0.031393655226087204,
"grad_norm": 0.1015625,
"learning_rate": 0.001074,
"loss": 1.3457,
"step": 358
},
{
"epoch": 0.031481347000461755,
"grad_norm": 0.1318359375,
"learning_rate": 0.001077,
"loss": 1.3499,
"step": 359
},
{
"epoch": 0.0315690387748363,
"grad_norm": 0.125,
"learning_rate": 0.00108,
"loss": 1.385,
"step": 360
},
{
"epoch": 0.031656730549210844,
"grad_norm": 0.0966796875,
"learning_rate": 0.001083,
"loss": 1.3921,
"step": 361
},
{
"epoch": 0.03174442232358539,
"grad_norm": 0.10888671875,
"learning_rate": 0.001086,
"loss": 1.3615,
"step": 362
},
{
"epoch": 0.03183211409795993,
"grad_norm": 0.1376953125,
"learning_rate": 0.001089,
"loss": 1.3125,
"step": 363
},
{
"epoch": 0.03191980587233448,
"grad_norm": 0.2080078125,
"learning_rate": 0.001092,
"loss": 1.3186,
"step": 364
},
{
"epoch": 0.03200749764670902,
"grad_norm": 0.09375,
"learning_rate": 0.001095,
"loss": 1.364,
"step": 365
},
{
"epoch": 0.03209518942108357,
"grad_norm": 0.205078125,
"learning_rate": 0.001098,
"loss": 1.4273,
"step": 366
},
{
"epoch": 0.03218288119545811,
"grad_norm": 0.09375,
"learning_rate": 0.001101,
"loss": 1.3585,
"step": 367
},
{
"epoch": 0.032270572969832656,
"grad_norm": 0.111328125,
"learning_rate": 0.001104,
"loss": 1.3809,
"step": 368
},
{
"epoch": 0.03235826474420721,
"grad_norm": 0.11572265625,
"learning_rate": 0.001107,
"loss": 1.4134,
"step": 369
},
{
"epoch": 0.03244595651858175,
"grad_norm": 0.154296875,
"learning_rate": 0.00111,
"loss": 1.3046,
"step": 370
},
{
"epoch": 0.032533648292956296,
"grad_norm": 0.1455078125,
"learning_rate": 0.001113,
"loss": 1.44,
"step": 371
},
{
"epoch": 0.03262134006733084,
"grad_norm": 0.10400390625,
"learning_rate": 0.001116,
"loss": 1.3767,
"step": 372
},
{
"epoch": 0.032709031841705385,
"grad_norm": 0.10400390625,
"learning_rate": 0.001119,
"loss": 1.3568,
"step": 373
},
{
"epoch": 0.03279672361607993,
"grad_norm": 0.10302734375,
"learning_rate": 0.001122,
"loss": 1.2876,
"step": 374
},
{
"epoch": 0.032884415390454474,
"grad_norm": 0.09326171875,
"learning_rate": 0.0011250000000000001,
"loss": 1.3378,
"step": 375
},
{
"epoch": 0.03297210716482902,
"grad_norm": 0.12255859375,
"learning_rate": 0.001128,
"loss": 1.2907,
"step": 376
},
{
"epoch": 0.033059798939203563,
"grad_norm": 0.091796875,
"learning_rate": 0.001131,
"loss": 1.3901,
"step": 377
},
{
"epoch": 0.033147490713578115,
"grad_norm": 0.12158203125,
"learning_rate": 0.001134,
"loss": 1.3821,
"step": 378
},
{
"epoch": 0.03323518248795266,
"grad_norm": 0.083984375,
"learning_rate": 0.001137,
"loss": 1.3682,
"step": 379
},
{
"epoch": 0.033322874262327204,
"grad_norm": 0.1123046875,
"learning_rate": 0.00114,
"loss": 1.3609,
"step": 380
},
{
"epoch": 0.03341056603670175,
"grad_norm": 0.0966796875,
"learning_rate": 0.0011430000000000001,
"loss": 1.3734,
"step": 381
},
{
"epoch": 0.03349825781107629,
"grad_norm": 0.1591796875,
"learning_rate": 0.001146,
"loss": 1.426,
"step": 382
},
{
"epoch": 0.03358594958545084,
"grad_norm": 0.1376953125,
"learning_rate": 0.001149,
"loss": 1.3942,
"step": 383
},
{
"epoch": 0.03367364135982538,
"grad_norm": 0.0908203125,
"learning_rate": 0.001152,
"loss": 1.3952,
"step": 384
},
{
"epoch": 0.033761333134199926,
"grad_norm": 0.09033203125,
"learning_rate": 0.001155,
"loss": 1.4302,
"step": 385
},
{
"epoch": 0.03384902490857447,
"grad_norm": 0.10009765625,
"learning_rate": 0.001158,
"loss": 1.3961,
"step": 386
},
{
"epoch": 0.03393671668294902,
"grad_norm": 0.10888671875,
"learning_rate": 0.0011610000000000001,
"loss": 1.3277,
"step": 387
},
{
"epoch": 0.03402440845732357,
"grad_norm": 0.09765625,
"learning_rate": 0.001164,
"loss": 1.2633,
"step": 388
},
{
"epoch": 0.03411210023169811,
"grad_norm": 0.1220703125,
"learning_rate": 0.001167,
"loss": 1.3971,
"step": 389
},
{
"epoch": 0.034199792006072656,
"grad_norm": 0.09619140625,
"learning_rate": 0.00117,
"loss": 1.3659,
"step": 390
},
{
"epoch": 0.0342874837804472,
"grad_norm": 0.1640625,
"learning_rate": 0.001173,
"loss": 1.4147,
"step": 391
},
{
"epoch": 0.034375175554821745,
"grad_norm": 0.10595703125,
"learning_rate": 0.001176,
"loss": 1.3504,
"step": 392
},
{
"epoch": 0.03446286732919629,
"grad_norm": 0.15625,
"learning_rate": 0.0011790000000000001,
"loss": 1.3842,
"step": 393
},
{
"epoch": 0.034550559103570834,
"grad_norm": 0.11279296875,
"learning_rate": 0.001182,
"loss": 1.4277,
"step": 394
},
{
"epoch": 0.03463825087794538,
"grad_norm": 0.142578125,
"learning_rate": 0.001185,
"loss": 1.3052,
"step": 395
},
{
"epoch": 0.03472594265231993,
"grad_norm": 0.140625,
"learning_rate": 0.001188,
"loss": 1.41,
"step": 396
},
{
"epoch": 0.034813634426694474,
"grad_norm": 0.154296875,
"learning_rate": 0.001191,
"loss": 1.3605,
"step": 397
},
{
"epoch": 0.03490132620106902,
"grad_norm": 0.1572265625,
"learning_rate": 0.0011940000000000002,
"loss": 1.3762,
"step": 398
},
{
"epoch": 0.03498901797544356,
"grad_norm": 0.1845703125,
"learning_rate": 0.0011970000000000001,
"loss": 1.2598,
"step": 399
},
{
"epoch": 0.03507670974981811,
"grad_norm": 0.2236328125,
"learning_rate": 0.0012000000000000001,
"loss": 1.352,
"step": 400
},
{
"epoch": 0.03516440152419265,
"grad_norm": 0.1259765625,
"learning_rate": 0.001203,
"loss": 1.3672,
"step": 401
},
{
"epoch": 0.0352520932985672,
"grad_norm": 0.1767578125,
"learning_rate": 0.001206,
"loss": 1.4493,
"step": 402
},
{
"epoch": 0.03533978507294174,
"grad_norm": 0.2236328125,
"learning_rate": 0.001209,
"loss": 1.3753,
"step": 403
},
{
"epoch": 0.035427476847316286,
"grad_norm": 0.15234375,
"learning_rate": 0.0012120000000000002,
"loss": 1.3092,
"step": 404
},
{
"epoch": 0.03551516862169084,
"grad_norm": 0.12890625,
"learning_rate": 0.0012150000000000002,
"loss": 1.3083,
"step": 405
},
{
"epoch": 0.03560286039606538,
"grad_norm": 0.1025390625,
"learning_rate": 0.0012180000000000001,
"loss": 1.2666,
"step": 406
},
{
"epoch": 0.035690552170439926,
"grad_norm": 0.18359375,
"learning_rate": 0.0012209999999999999,
"loss": 1.31,
"step": 407
},
{
"epoch": 0.03577824394481447,
"grad_norm": 0.130859375,
"learning_rate": 0.001224,
"loss": 1.3723,
"step": 408
},
{
"epoch": 0.035865935719189015,
"grad_norm": 0.10400390625,
"learning_rate": 0.001227,
"loss": 1.296,
"step": 409
},
{
"epoch": 0.03595362749356356,
"grad_norm": 0.1689453125,
"learning_rate": 0.00123,
"loss": 1.3209,
"step": 410
},
{
"epoch": 0.036041319267938104,
"grad_norm": 0.0869140625,
"learning_rate": 0.001233,
"loss": 1.3439,
"step": 411
},
{
"epoch": 0.03612901104231265,
"grad_norm": 0.1640625,
"learning_rate": 0.001236,
"loss": 1.361,
"step": 412
},
{
"epoch": 0.036216702816687193,
"grad_norm": 0.150390625,
"learning_rate": 0.0012389999999999999,
"loss": 1.3845,
"step": 413
},
{
"epoch": 0.036304394591061745,
"grad_norm": 0.12255859375,
"learning_rate": 0.001242,
"loss": 1.3486,
"step": 414
},
{
"epoch": 0.03639208636543629,
"grad_norm": 0.173828125,
"learning_rate": 0.001245,
"loss": 1.3611,
"step": 415
},
{
"epoch": 0.036479778139810834,
"grad_norm": 0.158203125,
"learning_rate": 0.001248,
"loss": 1.3968,
"step": 416
},
{
"epoch": 0.03656746991418538,
"grad_norm": 0.1904296875,
"learning_rate": 0.001251,
"loss": 1.3516,
"step": 417
},
{
"epoch": 0.03665516168855992,
"grad_norm": 0.216796875,
"learning_rate": 0.001254,
"loss": 1.3711,
"step": 418
},
{
"epoch": 0.03674285346293447,
"grad_norm": 0.1689453125,
"learning_rate": 0.0012569999999999999,
"loss": 1.3854,
"step": 419
},
{
"epoch": 0.03683054523730901,
"grad_norm": 0.1650390625,
"learning_rate": 0.00126,
"loss": 1.3558,
"step": 420
},
{
"epoch": 0.036918237011683556,
"grad_norm": 0.1728515625,
"learning_rate": 0.001263,
"loss": 1.2976,
"step": 421
},
{
"epoch": 0.0370059287860581,
"grad_norm": 0.103515625,
"learning_rate": 0.001266,
"loss": 1.3582,
"step": 422
},
{
"epoch": 0.03709362056043265,
"grad_norm": 0.1689453125,
"learning_rate": 0.001269,
"loss": 1.3893,
"step": 423
},
{
"epoch": 0.0371813123348072,
"grad_norm": 0.10986328125,
"learning_rate": 0.001272,
"loss": 1.2884,
"step": 424
},
{
"epoch": 0.03726900410918174,
"grad_norm": 0.1181640625,
"learning_rate": 0.001275,
"loss": 1.3434,
"step": 425
},
{
"epoch": 0.037356695883556286,
"grad_norm": 0.1044921875,
"learning_rate": 0.001278,
"loss": 1.3381,
"step": 426
},
{
"epoch": 0.03744438765793083,
"grad_norm": 0.140625,
"learning_rate": 0.001281,
"loss": 1.3903,
"step": 427
},
{
"epoch": 0.037532079432305375,
"grad_norm": 0.08935546875,
"learning_rate": 0.001284,
"loss": 1.3506,
"step": 428
},
{
"epoch": 0.03761977120667992,
"grad_norm": 0.09814453125,
"learning_rate": 0.001287,
"loss": 1.2621,
"step": 429
},
{
"epoch": 0.037707462981054464,
"grad_norm": 0.09814453125,
"learning_rate": 0.00129,
"loss": 1.3424,
"step": 430
},
{
"epoch": 0.03779515475542901,
"grad_norm": 0.1162109375,
"learning_rate": 0.001293,
"loss": 1.4801,
"step": 431
},
{
"epoch": 0.03788284652980356,
"grad_norm": 0.09814453125,
"learning_rate": 0.001296,
"loss": 1.3269,
"step": 432
},
{
"epoch": 0.037970538304178104,
"grad_norm": 0.138671875,
"learning_rate": 0.001299,
"loss": 1.3656,
"step": 433
},
{
"epoch": 0.03805823007855265,
"grad_norm": 0.08837890625,
"learning_rate": 0.001302,
"loss": 1.3883,
"step": 434
},
{
"epoch": 0.03814592185292719,
"grad_norm": 0.1044921875,
"learning_rate": 0.001305,
"loss": 1.3636,
"step": 435
},
{
"epoch": 0.03823361362730174,
"grad_norm": 0.09130859375,
"learning_rate": 0.001308,
"loss": 1.3846,
"step": 436
},
{
"epoch": 0.03832130540167628,
"grad_norm": 0.1201171875,
"learning_rate": 0.001311,
"loss": 1.3506,
"step": 437
},
{
"epoch": 0.03840899717605083,
"grad_norm": 0.0859375,
"learning_rate": 0.001314,
"loss": 1.3023,
"step": 438
},
{
"epoch": 0.03849668895042537,
"grad_norm": 0.109375,
"learning_rate": 0.001317,
"loss": 1.3817,
"step": 439
},
{
"epoch": 0.038584380724799916,
"grad_norm": 0.1435546875,
"learning_rate": 0.00132,
"loss": 1.3687,
"step": 440
},
{
"epoch": 0.03867207249917447,
"grad_norm": 0.13671875,
"learning_rate": 0.001323,
"loss": 1.3563,
"step": 441
},
{
"epoch": 0.03875976427354901,
"grad_norm": 0.08837890625,
"learning_rate": 0.0013260000000000001,
"loss": 1.3338,
"step": 442
},
{
"epoch": 0.038847456047923556,
"grad_norm": 0.1103515625,
"learning_rate": 0.001329,
"loss": 1.3922,
"step": 443
},
{
"epoch": 0.0389351478222981,
"grad_norm": 0.12158203125,
"learning_rate": 0.001332,
"loss": 1.2568,
"step": 444
},
{
"epoch": 0.039022839596672645,
"grad_norm": 0.1337890625,
"learning_rate": 0.001335,
"loss": 1.3628,
"step": 445
},
{
"epoch": 0.03911053137104719,
"grad_norm": 0.1064453125,
"learning_rate": 0.001338,
"loss": 1.3751,
"step": 446
},
{
"epoch": 0.039198223145421734,
"grad_norm": 0.126953125,
"learning_rate": 0.001341,
"loss": 1.3495,
"step": 447
},
{
"epoch": 0.03928591491979628,
"grad_norm": 0.1474609375,
"learning_rate": 0.0013440000000000001,
"loss": 1.317,
"step": 448
},
{
"epoch": 0.039373606694170823,
"grad_norm": 0.09765625,
"learning_rate": 0.001347,
"loss": 1.3535,
"step": 449
},
{
"epoch": 0.039461298468545375,
"grad_norm": 0.12890625,
"learning_rate": 0.00135,
"loss": 1.2891,
"step": 450
},
{
"epoch": 0.03954899024291992,
"grad_norm": 0.1376953125,
"learning_rate": 0.001353,
"loss": 1.3959,
"step": 451
},
{
"epoch": 0.039636682017294464,
"grad_norm": 0.154296875,
"learning_rate": 0.001356,
"loss": 1.3192,
"step": 452
},
{
"epoch": 0.03972437379166901,
"grad_norm": 0.1572265625,
"learning_rate": 0.001359,
"loss": 1.4324,
"step": 453
},
{
"epoch": 0.03981206556604355,
"grad_norm": 0.0986328125,
"learning_rate": 0.0013620000000000001,
"loss": 1.3246,
"step": 454
},
{
"epoch": 0.0398997573404181,
"grad_norm": 0.138671875,
"learning_rate": 0.0013650000000000001,
"loss": 1.2848,
"step": 455
},
{
"epoch": 0.03998744911479264,
"grad_norm": 0.142578125,
"learning_rate": 0.001368,
"loss": 1.3973,
"step": 456
},
{
"epoch": 0.040075140889167186,
"grad_norm": 0.1572265625,
"learning_rate": 0.001371,
"loss": 1.3794,
"step": 457
},
{
"epoch": 0.04016283266354173,
"grad_norm": 0.099609375,
"learning_rate": 0.001374,
"loss": 1.2708,
"step": 458
},
{
"epoch": 0.04025052443791628,
"grad_norm": 0.171875,
"learning_rate": 0.0013770000000000002,
"loss": 1.3587,
"step": 459
},
{
"epoch": 0.04033821621229083,
"grad_norm": 0.1611328125,
"learning_rate": 0.0013800000000000002,
"loss": 1.2727,
"step": 460
},
{
"epoch": 0.04042590798666537,
"grad_norm": 0.171875,
"learning_rate": 0.0013830000000000001,
"loss": 1.3551,
"step": 461
},
{
"epoch": 0.040513599761039916,
"grad_norm": 0.1328125,
"learning_rate": 0.001386,
"loss": 1.3897,
"step": 462
},
{
"epoch": 0.04060129153541446,
"grad_norm": 0.14453125,
"learning_rate": 0.001389,
"loss": 1.3319,
"step": 463
},
{
"epoch": 0.040688983309789005,
"grad_norm": 0.16796875,
"learning_rate": 0.001392,
"loss": 1.2967,
"step": 464
},
{
"epoch": 0.04077667508416355,
"grad_norm": 0.138671875,
"learning_rate": 0.0013950000000000002,
"loss": 1.3376,
"step": 465
},
{
"epoch": 0.040864366858538094,
"grad_norm": 0.146484375,
"learning_rate": 0.0013980000000000002,
"loss": 1.3628,
"step": 466
},
{
"epoch": 0.04095205863291264,
"grad_norm": 0.1982421875,
"learning_rate": 0.0014010000000000001,
"loss": 1.2868,
"step": 467
},
{
"epoch": 0.04103975040728718,
"grad_norm": 0.0947265625,
"learning_rate": 0.001404,
"loss": 1.3618,
"step": 468
},
{
"epoch": 0.041127442181661734,
"grad_norm": 0.2197265625,
"learning_rate": 0.001407,
"loss": 1.3419,
"step": 469
},
{
"epoch": 0.04121513395603628,
"grad_norm": 0.09765625,
"learning_rate": 0.00141,
"loss": 1.3019,
"step": 470
},
{
"epoch": 0.04130282573041082,
"grad_norm": 0.2470703125,
"learning_rate": 0.001413,
"loss": 1.4217,
"step": 471
},
{
"epoch": 0.04139051750478537,
"grad_norm": 0.0986328125,
"learning_rate": 0.001416,
"loss": 1.3689,
"step": 472
},
{
"epoch": 0.04147820927915991,
"grad_norm": 0.173828125,
"learning_rate": 0.001419,
"loss": 1.3036,
"step": 473
},
{
"epoch": 0.04156590105353446,
"grad_norm": 0.0986328125,
"learning_rate": 0.0014219999999999999,
"loss": 1.3835,
"step": 474
},
{
"epoch": 0.041653592827909,
"grad_norm": 0.1611328125,
"learning_rate": 0.001425,
"loss": 1.2816,
"step": 475
},
{
"epoch": 0.041741284602283546,
"grad_norm": 0.107421875,
"learning_rate": 0.001428,
"loss": 1.3424,
"step": 476
},
{
"epoch": 0.04182897637665809,
"grad_norm": 0.1044921875,
"learning_rate": 0.001431,
"loss": 1.3255,
"step": 477
},
{
"epoch": 0.04191666815103264,
"grad_norm": 0.11376953125,
"learning_rate": 0.001434,
"loss": 1.358,
"step": 478
},
{
"epoch": 0.042004359925407186,
"grad_norm": 0.103515625,
"learning_rate": 0.001437,
"loss": 1.3379,
"step": 479
},
{
"epoch": 0.04209205169978173,
"grad_norm": 0.1162109375,
"learning_rate": 0.0014399999999999999,
"loss": 1.2963,
"step": 480
},
{
"epoch": 0.042179743474156275,
"grad_norm": 0.08984375,
"learning_rate": 0.001443,
"loss": 1.2964,
"step": 481
},
{
"epoch": 0.04226743524853082,
"grad_norm": 0.08837890625,
"learning_rate": 0.001446,
"loss": 1.2804,
"step": 482
},
{
"epoch": 0.042355127022905364,
"grad_norm": 0.1181640625,
"learning_rate": 0.001449,
"loss": 1.353,
"step": 483
},
{
"epoch": 0.04244281879727991,
"grad_norm": 0.140625,
"learning_rate": 0.001452,
"loss": 1.2805,
"step": 484
},
{
"epoch": 0.042530510571654453,
"grad_norm": 0.130859375,
"learning_rate": 0.001455,
"loss": 1.3353,
"step": 485
},
{
"epoch": 0.042618202346029,
"grad_norm": 0.1845703125,
"learning_rate": 0.001458,
"loss": 1.4368,
"step": 486
},
{
"epoch": 0.04270589412040355,
"grad_norm": 0.1416015625,
"learning_rate": 0.001461,
"loss": 1.355,
"step": 487
},
{
"epoch": 0.042793585894778094,
"grad_norm": 0.1318359375,
"learning_rate": 0.001464,
"loss": 1.3571,
"step": 488
},
{
"epoch": 0.04288127766915264,
"grad_norm": 0.12890625,
"learning_rate": 0.001467,
"loss": 1.3144,
"step": 489
},
{
"epoch": 0.04296896944352718,
"grad_norm": 0.09716796875,
"learning_rate": 0.00147,
"loss": 1.3431,
"step": 490
},
{
"epoch": 0.04305666121790173,
"grad_norm": 0.119140625,
"learning_rate": 0.001473,
"loss": 1.3331,
"step": 491
},
{
"epoch": 0.04314435299227627,
"grad_norm": 0.10205078125,
"learning_rate": 0.001476,
"loss": 1.3873,
"step": 492
},
{
"epoch": 0.043232044766650816,
"grad_norm": 0.1181640625,
"learning_rate": 0.001479,
"loss": 1.3456,
"step": 493
},
{
"epoch": 0.04331973654102536,
"grad_norm": 0.10107421875,
"learning_rate": 0.001482,
"loss": 1.354,
"step": 494
},
{
"epoch": 0.043407428315399905,
"grad_norm": 0.1513671875,
"learning_rate": 0.001485,
"loss": 1.3155,
"step": 495
},
{
"epoch": 0.04349512008977446,
"grad_norm": 0.1318359375,
"learning_rate": 0.001488,
"loss": 1.3318,
"step": 496
},
{
"epoch": 0.043582811864149,
"grad_norm": 0.1318359375,
"learning_rate": 0.001491,
"loss": 1.3467,
"step": 497
},
{
"epoch": 0.043670503638523546,
"grad_norm": 0.1005859375,
"learning_rate": 0.001494,
"loss": 1.33,
"step": 498
},
{
"epoch": 0.04375819541289809,
"grad_norm": 0.09765625,
"learning_rate": 0.001497,
"loss": 1.3274,
"step": 499
},
{
"epoch": 0.043845887187272635,
"grad_norm": 0.138671875,
"learning_rate": 0.0015,
"loss": 1.3365,
"step": 500
},
{
"epoch": 0.043845887187272635,
"eval_loss": 1.3519667387008667,
"eval_runtime": 427.9384,
"eval_samples_per_second": 33.76,
"eval_steps_per_second": 8.44,
"step": 500
},
{
"epoch": 0.04393357896164718,
"grad_norm": 0.1767578125,
"learning_rate": 0.001503,
"loss": 1.3827,
"step": 501
},
{
"epoch": 0.044021270736021724,
"grad_norm": 0.08984375,
"learning_rate": 0.001506,
"loss": 1.3601,
"step": 502
},
{
"epoch": 0.04410896251039627,
"grad_norm": 0.1298828125,
"learning_rate": 0.0015090000000000001,
"loss": 1.3613,
"step": 503
},
{
"epoch": 0.04419665428477081,
"grad_norm": 0.15234375,
"learning_rate": 0.001512,
"loss": 1.3476,
"step": 504
},
{
"epoch": 0.044284346059145364,
"grad_norm": 0.09619140625,
"learning_rate": 0.001515,
"loss": 1.382,
"step": 505
},
{
"epoch": 0.04437203783351991,
"grad_norm": 0.134765625,
"learning_rate": 0.001518,
"loss": 1.3764,
"step": 506
},
{
"epoch": 0.04445972960789445,
"grad_norm": 0.09130859375,
"learning_rate": 0.001521,
"loss": 1.265,
"step": 507
},
{
"epoch": 0.044547421382269,
"grad_norm": 0.0908203125,
"learning_rate": 0.001524,
"loss": 1.3309,
"step": 508
},
{
"epoch": 0.04463511315664354,
"grad_norm": 0.095703125,
"learning_rate": 0.0015270000000000001,
"loss": 1.3333,
"step": 509
},
{
"epoch": 0.04472280493101809,
"grad_norm": 0.09765625,
"learning_rate": 0.0015300000000000001,
"loss": 1.3741,
"step": 510
},
{
"epoch": 0.04481049670539263,
"grad_norm": 0.1279296875,
"learning_rate": 0.001533,
"loss": 1.3363,
"step": 511
},
{
"epoch": 0.044898188479767176,
"grad_norm": 0.12890625,
"learning_rate": 0.001536,
"loss": 1.3045,
"step": 512
},
{
"epoch": 0.04498588025414172,
"grad_norm": 0.15625,
"learning_rate": 0.001539,
"loss": 1.4171,
"step": 513
},
{
"epoch": 0.04507357202851627,
"grad_norm": 0.0849609375,
"learning_rate": 0.001542,
"loss": 1.3343,
"step": 514
},
{
"epoch": 0.045161263802890816,
"grad_norm": 0.1640625,
"learning_rate": 0.0015450000000000001,
"loss": 1.3433,
"step": 515
},
{
"epoch": 0.04524895557726536,
"grad_norm": 0.09912109375,
"learning_rate": 0.0015480000000000001,
"loss": 1.281,
"step": 516
},
{
"epoch": 0.045336647351639905,
"grad_norm": 0.1171875,
"learning_rate": 0.001551,
"loss": 1.3229,
"step": 517
},
{
"epoch": 0.04542433912601445,
"grad_norm": 0.0869140625,
"learning_rate": 0.001554,
"loss": 1.3284,
"step": 518
},
{
"epoch": 0.045512030900388994,
"grad_norm": 0.11767578125,
"learning_rate": 0.001557,
"loss": 1.3185,
"step": 519
},
{
"epoch": 0.04559972267476354,
"grad_norm": 0.12255859375,
"learning_rate": 0.0015600000000000002,
"loss": 1.4157,
"step": 520
},
{
"epoch": 0.045687414449138083,
"grad_norm": 0.1171875,
"learning_rate": 0.0015630000000000002,
"loss": 1.3225,
"step": 521
},
{
"epoch": 0.04577510622351263,
"grad_norm": 0.1591796875,
"learning_rate": 0.0015660000000000001,
"loss": 1.2768,
"step": 522
},
{
"epoch": 0.04586279799788718,
"grad_norm": 0.1083984375,
"learning_rate": 0.001569,
"loss": 1.3378,
"step": 523
},
{
"epoch": 0.045950489772261724,
"grad_norm": 0.2158203125,
"learning_rate": 0.001572,
"loss": 1.3312,
"step": 524
},
{
"epoch": 0.04603818154663627,
"grad_norm": 0.146484375,
"learning_rate": 0.001575,
"loss": 1.3473,
"step": 525
},
{
"epoch": 0.04612587332101081,
"grad_norm": 0.1533203125,
"learning_rate": 0.0015780000000000002,
"loss": 1.3193,
"step": 526
},
{
"epoch": 0.04621356509538536,
"grad_norm": 0.166015625,
"learning_rate": 0.0015810000000000002,
"loss": 1.3617,
"step": 527
},
{
"epoch": 0.0463012568697599,
"grad_norm": 0.146484375,
"learning_rate": 0.0015840000000000001,
"loss": 1.4205,
"step": 528
},
{
"epoch": 0.046388948644134446,
"grad_norm": 0.1943359375,
"learning_rate": 0.001587,
"loss": 1.3412,
"step": 529
},
{
"epoch": 0.04647664041850899,
"grad_norm": 0.138671875,
"learning_rate": 0.00159,
"loss": 1.3731,
"step": 530
},
{
"epoch": 0.046564332192883535,
"grad_norm": 0.158203125,
"learning_rate": 0.001593,
"loss": 1.3481,
"step": 531
},
{
"epoch": 0.04665202396725809,
"grad_norm": 0.11865234375,
"learning_rate": 0.0015960000000000002,
"loss": 1.3438,
"step": 532
},
{
"epoch": 0.04673971574163263,
"grad_norm": 0.11376953125,
"learning_rate": 0.0015990000000000002,
"loss": 1.3893,
"step": 533
},
{
"epoch": 0.046827407516007176,
"grad_norm": 0.12060546875,
"learning_rate": 0.0016020000000000001,
"loss": 1.3365,
"step": 534
},
{
"epoch": 0.04691509929038172,
"grad_norm": 0.177734375,
"learning_rate": 0.001605,
"loss": 1.3429,
"step": 535
},
{
"epoch": 0.047002791064756265,
"grad_norm": 0.109375,
"learning_rate": 0.001608,
"loss": 1.3629,
"step": 536
},
{
"epoch": 0.04709048283913081,
"grad_norm": 0.177734375,
"learning_rate": 0.0016110000000000002,
"loss": 1.2756,
"step": 537
},
{
"epoch": 0.047178174613505354,
"grad_norm": 0.1103515625,
"learning_rate": 0.0016140000000000002,
"loss": 1.3064,
"step": 538
},
{
"epoch": 0.0472658663878799,
"grad_norm": 0.1455078125,
"learning_rate": 0.0016170000000000002,
"loss": 1.3871,
"step": 539
},
{
"epoch": 0.04735355816225444,
"grad_norm": 0.1220703125,
"learning_rate": 0.0016200000000000001,
"loss": 1.2368,
"step": 540
},
{
"epoch": 0.047441249936628994,
"grad_norm": 0.1572265625,
"learning_rate": 0.001623,
"loss": 1.376,
"step": 541
},
{
"epoch": 0.04752894171100354,
"grad_norm": 0.16015625,
"learning_rate": 0.001626,
"loss": 1.3061,
"step": 542
},
{
"epoch": 0.04761663348537808,
"grad_norm": 0.130859375,
"learning_rate": 0.0016290000000000002,
"loss": 1.2628,
"step": 543
},
{
"epoch": 0.04770432525975263,
"grad_norm": 0.1337890625,
"learning_rate": 0.0016320000000000002,
"loss": 1.2792,
"step": 544
},
{
"epoch": 0.04779201703412717,
"grad_norm": 0.10107421875,
"learning_rate": 0.0016350000000000002,
"loss": 1.285,
"step": 545
},
{
"epoch": 0.04787970880850172,
"grad_norm": 0.193359375,
"learning_rate": 0.0016380000000000001,
"loss": 1.3308,
"step": 546
},
{
"epoch": 0.04796740058287626,
"grad_norm": 0.10302734375,
"learning_rate": 0.001641,
"loss": 1.3833,
"step": 547
},
{
"epoch": 0.048055092357250806,
"grad_norm": 0.2080078125,
"learning_rate": 0.001644,
"loss": 1.3428,
"step": 548
},
{
"epoch": 0.04814278413162535,
"grad_norm": 0.083984375,
"learning_rate": 0.0016470000000000002,
"loss": 1.3376,
"step": 549
},
{
"epoch": 0.0482304759059999,
"grad_norm": 0.099609375,
"learning_rate": 0.0016500000000000002,
"loss": 1.4305,
"step": 550
},
{
"epoch": 0.048318167680374446,
"grad_norm": 0.140625,
"learning_rate": 0.0016530000000000002,
"loss": 1.3828,
"step": 551
},
{
"epoch": 0.04840585945474899,
"grad_norm": 0.09716796875,
"learning_rate": 0.0016560000000000001,
"loss": 1.323,
"step": 552
},
{
"epoch": 0.048493551229123535,
"grad_norm": 0.12890625,
"learning_rate": 0.001659,
"loss": 1.3733,
"step": 553
},
{
"epoch": 0.04858124300349808,
"grad_norm": 0.08837890625,
"learning_rate": 0.0016620000000000003,
"loss": 1.3101,
"step": 554
},
{
"epoch": 0.048668934777872624,
"grad_norm": 0.1064453125,
"learning_rate": 0.0016650000000000002,
"loss": 1.3806,
"step": 555
},
{
"epoch": 0.04875662655224717,
"grad_norm": 0.119140625,
"learning_rate": 0.0016680000000000002,
"loss": 1.321,
"step": 556
},
{
"epoch": 0.04884431832662171,
"grad_norm": 0.1416015625,
"learning_rate": 0.0016710000000000002,
"loss": 1.3756,
"step": 557
},
{
"epoch": 0.04893201010099626,
"grad_norm": 0.1572265625,
"learning_rate": 0.0016740000000000001,
"loss": 1.3487,
"step": 558
},
{
"epoch": 0.04901970187537081,
"grad_norm": 0.11669921875,
"learning_rate": 0.001677,
"loss": 1.2843,
"step": 559
},
{
"epoch": 0.049107393649745354,
"grad_norm": 0.1220703125,
"learning_rate": 0.0016800000000000003,
"loss": 1.3796,
"step": 560
},
{
"epoch": 0.0491950854241199,
"grad_norm": 0.08056640625,
"learning_rate": 0.0016830000000000003,
"loss": 1.3371,
"step": 561
},
{
"epoch": 0.04928277719849444,
"grad_norm": 0.10107421875,
"learning_rate": 0.0016860000000000002,
"loss": 1.3571,
"step": 562
},
{
"epoch": 0.04937046897286899,
"grad_norm": 0.140625,
"learning_rate": 0.001689,
"loss": 1.3039,
"step": 563
},
{
"epoch": 0.04945816074724353,
"grad_norm": 0.0888671875,
"learning_rate": 0.001692,
"loss": 1.3606,
"step": 564
},
{
"epoch": 0.049545852521618076,
"grad_norm": 0.1259765625,
"learning_rate": 0.001695,
"loss": 1.348,
"step": 565
},
{
"epoch": 0.04963354429599262,
"grad_norm": 0.10400390625,
"learning_rate": 0.0016979999999999999,
"loss": 1.291,
"step": 566
},
{
"epoch": 0.049721236070367165,
"grad_norm": 0.142578125,
"learning_rate": 0.0017009999999999998,
"loss": 1.3581,
"step": 567
},
{
"epoch": 0.04980892784474172,
"grad_norm": 0.0947265625,
"learning_rate": 0.0017039999999999998,
"loss": 1.311,
"step": 568
},
{
"epoch": 0.04989661961911626,
"grad_norm": 0.1416015625,
"learning_rate": 0.001707,
"loss": 1.3596,
"step": 569
},
{
"epoch": 0.049984311393490806,
"grad_norm": 0.11376953125,
"learning_rate": 0.00171,
"loss": 1.3427,
"step": 570
},
{
"epoch": 0.05007200316786535,
"grad_norm": 0.1396484375,
"learning_rate": 0.001713,
"loss": 1.3852,
"step": 571
},
{
"epoch": 0.050159694942239895,
"grad_norm": 0.1767578125,
"learning_rate": 0.0017159999999999999,
"loss": 1.3147,
"step": 572
},
{
"epoch": 0.05024738671661444,
"grad_norm": 0.11181640625,
"learning_rate": 0.0017189999999999998,
"loss": 1.3285,
"step": 573
},
{
"epoch": 0.050335078490988984,
"grad_norm": 0.1865234375,
"learning_rate": 0.001722,
"loss": 1.3213,
"step": 574
},
{
"epoch": 0.05042277026536353,
"grad_norm": 0.09619140625,
"learning_rate": 0.001725,
"loss": 1.2918,
"step": 575
},
{
"epoch": 0.05051046203973807,
"grad_norm": 0.09912109375,
"learning_rate": 0.001728,
"loss": 1.3342,
"step": 576
},
{
"epoch": 0.05059815381411262,
"grad_norm": 0.11865234375,
"learning_rate": 0.001731,
"loss": 1.3351,
"step": 577
},
{
"epoch": 0.05068584558848717,
"grad_norm": 0.140625,
"learning_rate": 0.0017339999999999999,
"loss": 1.4707,
"step": 578
},
{
"epoch": 0.05077353736286171,
"grad_norm": 0.125,
"learning_rate": 0.0017369999999999998,
"loss": 1.3766,
"step": 579
},
{
"epoch": 0.05086122913723626,
"grad_norm": 0.1630859375,
"learning_rate": 0.00174,
"loss": 1.3022,
"step": 580
},
{
"epoch": 0.0509489209116108,
"grad_norm": 0.109375,
"learning_rate": 0.001743,
"loss": 1.3365,
"step": 581
},
{
"epoch": 0.05103661268598535,
"grad_norm": 0.1552734375,
"learning_rate": 0.001746,
"loss": 1.2805,
"step": 582
},
{
"epoch": 0.05112430446035989,
"grad_norm": 0.1396484375,
"learning_rate": 0.001749,
"loss": 1.3641,
"step": 583
},
{
"epoch": 0.051211996234734436,
"grad_norm": 0.130859375,
"learning_rate": 0.0017519999999999999,
"loss": 1.3591,
"step": 584
},
{
"epoch": 0.05129968800910898,
"grad_norm": 0.1123046875,
"learning_rate": 0.0017549999999999998,
"loss": 1.2795,
"step": 585
},
{
"epoch": 0.051387379783483525,
"grad_norm": 0.09814453125,
"learning_rate": 0.001758,
"loss": 1.4044,
"step": 586
},
{
"epoch": 0.051475071557858076,
"grad_norm": 0.142578125,
"learning_rate": 0.001761,
"loss": 1.2848,
"step": 587
},
{
"epoch": 0.05156276333223262,
"grad_norm": 0.203125,
"learning_rate": 0.001764,
"loss": 1.3604,
"step": 588
},
{
"epoch": 0.051650455106607165,
"grad_norm": 0.1396484375,
"learning_rate": 0.001767,
"loss": 1.265,
"step": 589
},
{
"epoch": 0.05173814688098171,
"grad_norm": 0.1708984375,
"learning_rate": 0.0017699999999999999,
"loss": 1.3369,
"step": 590
},
{
"epoch": 0.051825838655356254,
"grad_norm": 0.1123046875,
"learning_rate": 0.001773,
"loss": 1.3202,
"step": 591
},
{
"epoch": 0.0519135304297308,
"grad_norm": 0.1044921875,
"learning_rate": 0.001776,
"loss": 1.3488,
"step": 592
},
{
"epoch": 0.05200122220410534,
"grad_norm": 0.1435546875,
"learning_rate": 0.001779,
"loss": 1.3653,
"step": 593
},
{
"epoch": 0.05208891397847989,
"grad_norm": 0.111328125,
"learning_rate": 0.001782,
"loss": 1.3817,
"step": 594
},
{
"epoch": 0.05217660575285443,
"grad_norm": 0.1259765625,
"learning_rate": 0.001785,
"loss": 1.3422,
"step": 595
},
{
"epoch": 0.052264297527228984,
"grad_norm": 0.10302734375,
"learning_rate": 0.0017879999999999999,
"loss": 1.3877,
"step": 596
},
{
"epoch": 0.05235198930160353,
"grad_norm": 0.1318359375,
"learning_rate": 0.001791,
"loss": 1.3137,
"step": 597
},
{
"epoch": 0.05243968107597807,
"grad_norm": 0.10986328125,
"learning_rate": 0.001794,
"loss": 1.3798,
"step": 598
},
{
"epoch": 0.05252737285035262,
"grad_norm": 0.11474609375,
"learning_rate": 0.001797,
"loss": 1.3497,
"step": 599
},
{
"epoch": 0.05261506462472716,
"grad_norm": 0.1142578125,
"learning_rate": 0.0018,
"loss": 1.3417,
"step": 600
},
{
"epoch": 0.052702756399101706,
"grad_norm": 0.125,
"learning_rate": 0.001803,
"loss": 1.3298,
"step": 601
},
{
"epoch": 0.05279044817347625,
"grad_norm": 0.10595703125,
"learning_rate": 0.0018059999999999999,
"loss": 1.3585,
"step": 602
},
{
"epoch": 0.052878139947850795,
"grad_norm": 0.11279296875,
"learning_rate": 0.001809,
"loss": 1.331,
"step": 603
},
{
"epoch": 0.05296583172222534,
"grad_norm": 0.11376953125,
"learning_rate": 0.001812,
"loss": 1.2992,
"step": 604
},
{
"epoch": 0.05305352349659989,
"grad_norm": 0.09619140625,
"learning_rate": 0.001815,
"loss": 1.2915,
"step": 605
},
{
"epoch": 0.053141215270974436,
"grad_norm": 0.09033203125,
"learning_rate": 0.001818,
"loss": 1.32,
"step": 606
},
{
"epoch": 0.05322890704534898,
"grad_norm": 0.0771484375,
"learning_rate": 0.001821,
"loss": 1.3601,
"step": 607
},
{
"epoch": 0.053316598819723525,
"grad_norm": 0.0927734375,
"learning_rate": 0.001824,
"loss": 1.3336,
"step": 608
},
{
"epoch": 0.05340429059409807,
"grad_norm": 0.09765625,
"learning_rate": 0.001827,
"loss": 1.347,
"step": 609
},
{
"epoch": 0.053491982368472614,
"grad_norm": 0.1650390625,
"learning_rate": 0.00183,
"loss": 1.3347,
"step": 610
},
{
"epoch": 0.05357967414284716,
"grad_norm": 0.10546875,
"learning_rate": 0.001833,
"loss": 1.3217,
"step": 611
},
{
"epoch": 0.0536673659172217,
"grad_norm": 0.09716796875,
"learning_rate": 0.001836,
"loss": 1.2653,
"step": 612
},
{
"epoch": 0.05375505769159625,
"grad_norm": 0.11083984375,
"learning_rate": 0.001839,
"loss": 1.3131,
"step": 613
},
{
"epoch": 0.0538427494659708,
"grad_norm": 0.166015625,
"learning_rate": 0.001842,
"loss": 1.3646,
"step": 614
},
{
"epoch": 0.05393044124034534,
"grad_norm": 0.091796875,
"learning_rate": 0.001845,
"loss": 1.3012,
"step": 615
},
{
"epoch": 0.05401813301471989,
"grad_norm": 0.11181640625,
"learning_rate": 0.001848,
"loss": 1.2882,
"step": 616
},
{
"epoch": 0.05410582478909443,
"grad_norm": 0.0927734375,
"learning_rate": 0.001851,
"loss": 1.2802,
"step": 617
},
{
"epoch": 0.05419351656346898,
"grad_norm": 0.107421875,
"learning_rate": 0.001854,
"loss": 1.3446,
"step": 618
},
{
"epoch": 0.05428120833784352,
"grad_norm": 0.138671875,
"learning_rate": 0.001857,
"loss": 1.3352,
"step": 619
},
{
"epoch": 0.054368900112218066,
"grad_norm": 0.14453125,
"learning_rate": 0.00186,
"loss": 1.3795,
"step": 620
},
{
"epoch": 0.05445659188659261,
"grad_norm": 0.142578125,
"learning_rate": 0.001863,
"loss": 1.3184,
"step": 621
},
{
"epoch": 0.054544283660967155,
"grad_norm": 0.12890625,
"learning_rate": 0.001866,
"loss": 1.306,
"step": 622
},
{
"epoch": 0.054631975435341706,
"grad_norm": 0.1357421875,
"learning_rate": 0.001869,
"loss": 1.3334,
"step": 623
},
{
"epoch": 0.05471966720971625,
"grad_norm": 0.09765625,
"learning_rate": 0.001872,
"loss": 1.3268,
"step": 624
},
{
"epoch": 0.054807358984090795,
"grad_norm": 0.11181640625,
"learning_rate": 0.001875,
"loss": 1.3805,
"step": 625
},
{
"epoch": 0.05489505075846534,
"grad_norm": 0.083984375,
"learning_rate": 0.0018780000000000001,
"loss": 1.4231,
"step": 626
},
{
"epoch": 0.054982742532839884,
"grad_norm": 0.1455078125,
"learning_rate": 0.001881,
"loss": 1.3484,
"step": 627
},
{
"epoch": 0.05507043430721443,
"grad_norm": 0.1064453125,
"learning_rate": 0.001884,
"loss": 1.3057,
"step": 628
},
{
"epoch": 0.05515812608158897,
"grad_norm": 0.1181640625,
"learning_rate": 0.001887,
"loss": 1.3342,
"step": 629
},
{
"epoch": 0.05524581785596352,
"grad_norm": 0.1396484375,
"learning_rate": 0.00189,
"loss": 1.3539,
"step": 630
},
{
"epoch": 0.05533350963033806,
"grad_norm": 0.1279296875,
"learning_rate": 0.0018930000000000002,
"loss": 1.3593,
"step": 631
},
{
"epoch": 0.055421201404712614,
"grad_norm": 0.1279296875,
"learning_rate": 0.0018960000000000001,
"loss": 1.3262,
"step": 632
},
{
"epoch": 0.05550889317908716,
"grad_norm": 0.126953125,
"learning_rate": 0.001899,
"loss": 1.3629,
"step": 633
},
{
"epoch": 0.0555965849534617,
"grad_norm": 0.107421875,
"learning_rate": 0.001902,
"loss": 1.313,
"step": 634
},
{
"epoch": 0.05568427672783625,
"grad_norm": 0.1376953125,
"learning_rate": 0.001905,
"loss": 1.3779,
"step": 635
},
{
"epoch": 0.05577196850221079,
"grad_norm": 0.07958984375,
"learning_rate": 0.001908,
"loss": 1.3722,
"step": 636
},
{
"epoch": 0.055859660276585336,
"grad_norm": 0.1494140625,
"learning_rate": 0.0019110000000000002,
"loss": 1.2903,
"step": 637
},
{
"epoch": 0.05594735205095988,
"grad_norm": 0.09619140625,
"learning_rate": 0.0019140000000000001,
"loss": 1.2875,
"step": 638
},
{
"epoch": 0.056035043825334425,
"grad_norm": 0.1337890625,
"learning_rate": 0.001917,
"loss": 1.3276,
"step": 639
},
{
"epoch": 0.05612273559970897,
"grad_norm": 0.1142578125,
"learning_rate": 0.00192,
"loss": 1.2509,
"step": 640
},
{
"epoch": 0.05621042737408352,
"grad_norm": 0.083984375,
"learning_rate": 0.001923,
"loss": 1.3149,
"step": 641
},
{
"epoch": 0.056298119148458066,
"grad_norm": 0.1279296875,
"learning_rate": 0.001926,
"loss": 1.3123,
"step": 642
},
{
"epoch": 0.05638581092283261,
"grad_norm": 0.09765625,
"learning_rate": 0.0019290000000000002,
"loss": 1.3152,
"step": 643
},
{
"epoch": 0.056473502697207155,
"grad_norm": 0.1669921875,
"learning_rate": 0.0019320000000000001,
"loss": 1.337,
"step": 644
},
{
"epoch": 0.0565611944715817,
"grad_norm": 0.11767578125,
"learning_rate": 0.001935,
"loss": 1.3222,
"step": 645
},
{
"epoch": 0.056648886245956244,
"grad_norm": 0.1484375,
"learning_rate": 0.001938,
"loss": 1.2985,
"step": 646
},
{
"epoch": 0.05673657802033079,
"grad_norm": 0.107421875,
"learning_rate": 0.001941,
"loss": 1.4031,
"step": 647
},
{
"epoch": 0.05682426979470533,
"grad_norm": 0.1123046875,
"learning_rate": 0.0019440000000000002,
"loss": 1.2429,
"step": 648
},
{
"epoch": 0.05691196156907988,
"grad_norm": 0.08154296875,
"learning_rate": 0.0019470000000000002,
"loss": 1.3375,
"step": 649
},
{
"epoch": 0.05699965334345443,
"grad_norm": 0.12451171875,
"learning_rate": 0.0019500000000000001,
"loss": 1.3173,
"step": 650
},
{
"epoch": 0.05708734511782897,
"grad_norm": 0.1171875,
"learning_rate": 0.001953,
"loss": 1.3144,
"step": 651
},
{
"epoch": 0.05717503689220352,
"grad_norm": 0.12353515625,
"learning_rate": 0.0019560000000000003,
"loss": 1.4079,
"step": 652
},
{
"epoch": 0.05726272866657806,
"grad_norm": 0.134765625,
"learning_rate": 0.0019590000000000002,
"loss": 1.4005,
"step": 653
},
{
"epoch": 0.05735042044095261,
"grad_norm": 0.142578125,
"learning_rate": 0.001962,
"loss": 1.4134,
"step": 654
},
{
"epoch": 0.05743811221532715,
"grad_norm": 0.169921875,
"learning_rate": 0.001965,
"loss": 1.3191,
"step": 655
},
{
"epoch": 0.057525803989701696,
"grad_norm": 0.1220703125,
"learning_rate": 0.001968,
"loss": 1.3546,
"step": 656
},
{
"epoch": 0.05761349576407624,
"grad_norm": 0.1513671875,
"learning_rate": 0.001971,
"loss": 1.3522,
"step": 657
},
{
"epoch": 0.057701187538450785,
"grad_norm": 0.1484375,
"learning_rate": 0.001974,
"loss": 1.318,
"step": 658
},
{
"epoch": 0.057788879312825336,
"grad_norm": 0.1103515625,
"learning_rate": 0.001977,
"loss": 1.3126,
"step": 659
},
{
"epoch": 0.05787657108719988,
"grad_norm": 0.0947265625,
"learning_rate": 0.00198,
"loss": 1.3112,
"step": 660
},
{
"epoch": 0.057964262861574425,
"grad_norm": 0.1044921875,
"learning_rate": 0.001983,
"loss": 1.3347,
"step": 661
},
{
"epoch": 0.05805195463594897,
"grad_norm": 0.125,
"learning_rate": 0.0019860000000000004,
"loss": 1.384,
"step": 662
},
{
"epoch": 0.058139646410323514,
"grad_norm": 0.11279296875,
"learning_rate": 0.0019890000000000003,
"loss": 1.3456,
"step": 663
},
{
"epoch": 0.05822733818469806,
"grad_norm": 0.1220703125,
"learning_rate": 0.0019920000000000003,
"loss": 1.3589,
"step": 664
},
{
"epoch": 0.0583150299590726,
"grad_norm": 0.1025390625,
"learning_rate": 0.0019950000000000002,
"loss": 1.3744,
"step": 665
},
{
"epoch": 0.05840272173344715,
"grad_norm": 0.1474609375,
"learning_rate": 0.001998,
"loss": 1.3614,
"step": 666
},
{
"epoch": 0.05849041350782169,
"grad_norm": 0.10400390625,
"learning_rate": 0.002001,
"loss": 1.3101,
"step": 667
},
{
"epoch": 0.058578105282196244,
"grad_norm": 0.11767578125,
"learning_rate": 0.002004,
"loss": 1.3496,
"step": 668
},
{
"epoch": 0.05866579705657079,
"grad_norm": 0.09716796875,
"learning_rate": 0.002007,
"loss": 1.3225,
"step": 669
},
{
"epoch": 0.05875348883094533,
"grad_norm": 0.1318359375,
"learning_rate": 0.00201,
"loss": 1.3426,
"step": 670
},
{
"epoch": 0.05884118060531988,
"grad_norm": 0.09521484375,
"learning_rate": 0.002013,
"loss": 1.2791,
"step": 671
},
{
"epoch": 0.05892887237969442,
"grad_norm": 0.11279296875,
"learning_rate": 0.002016,
"loss": 1.2956,
"step": 672
},
{
"epoch": 0.059016564154068966,
"grad_norm": 0.10400390625,
"learning_rate": 0.002019,
"loss": 1.2956,
"step": 673
},
{
"epoch": 0.05910425592844351,
"grad_norm": 0.09228515625,
"learning_rate": 0.0020220000000000004,
"loss": 1.3212,
"step": 674
},
{
"epoch": 0.059191947702818055,
"grad_norm": 0.10498046875,
"learning_rate": 0.0020250000000000003,
"loss": 1.3192,
"step": 675
},
{
"epoch": 0.0592796394771926,
"grad_norm": 0.07958984375,
"learning_rate": 0.0020280000000000003,
"loss": 1.373,
"step": 676
},
{
"epoch": 0.059367331251567144,
"grad_norm": 0.09423828125,
"learning_rate": 0.0020310000000000003,
"loss": 1.4266,
"step": 677
},
{
"epoch": 0.059455023025941696,
"grad_norm": 0.08447265625,
"learning_rate": 0.0020340000000000002,
"loss": 1.3394,
"step": 678
},
{
"epoch": 0.05954271480031624,
"grad_norm": 0.10400390625,
"learning_rate": 0.002037,
"loss": 1.3182,
"step": 679
},
{
"epoch": 0.059630406574690785,
"grad_norm": 0.11328125,
"learning_rate": 0.00204,
"loss": 1.3112,
"step": 680
},
{
"epoch": 0.05971809834906533,
"grad_norm": 0.08740234375,
"learning_rate": 0.002043,
"loss": 1.2881,
"step": 681
},
{
"epoch": 0.059805790123439874,
"grad_norm": 0.1279296875,
"learning_rate": 0.002046,
"loss": 1.3234,
"step": 682
},
{
"epoch": 0.05989348189781442,
"grad_norm": 0.1220703125,
"learning_rate": 0.002049,
"loss": 1.3557,
"step": 683
},
{
"epoch": 0.05998117367218896,
"grad_norm": 0.138671875,
"learning_rate": 0.002052,
"loss": 1.3126,
"step": 684
},
{
"epoch": 0.06006886544656351,
"grad_norm": 0.1259765625,
"learning_rate": 0.0020550000000000004,
"loss": 1.3457,
"step": 685
},
{
"epoch": 0.06015655722093805,
"grad_norm": 0.0986328125,
"learning_rate": 0.0020580000000000004,
"loss": 1.295,
"step": 686
},
{
"epoch": 0.0602442489953126,
"grad_norm": 0.115234375,
"learning_rate": 0.0020610000000000003,
"loss": 1.3832,
"step": 687
},
{
"epoch": 0.06033194076968715,
"grad_norm": 0.130859375,
"learning_rate": 0.002064,
"loss": 1.338,
"step": 688
},
{
"epoch": 0.06041963254406169,
"grad_norm": 0.08984375,
"learning_rate": 0.002067,
"loss": 1.3339,
"step": 689
},
{
"epoch": 0.06050732431843624,
"grad_norm": 0.138671875,
"learning_rate": 0.00207,
"loss": 1.3436,
"step": 690
},
{
"epoch": 0.06059501609281078,
"grad_norm": 0.09423828125,
"learning_rate": 0.0020729999999999998,
"loss": 1.3324,
"step": 691
},
{
"epoch": 0.060682707867185326,
"grad_norm": 0.1435546875,
"learning_rate": 0.0020759999999999997,
"loss": 1.3587,
"step": 692
},
{
"epoch": 0.06077039964155987,
"grad_norm": 0.15234375,
"learning_rate": 0.0020789999999999997,
"loss": 1.3409,
"step": 693
},
{
"epoch": 0.060858091415934415,
"grad_norm": 0.09716796875,
"learning_rate": 0.002082,
"loss": 1.358,
"step": 694
},
{
"epoch": 0.06094578319030896,
"grad_norm": 0.11767578125,
"learning_rate": 0.002085,
"loss": 1.2721,
"step": 695
},
{
"epoch": 0.06103347496468351,
"grad_norm": 0.1533203125,
"learning_rate": 0.002088,
"loss": 1.3178,
"step": 696
},
{
"epoch": 0.061121166739058055,
"grad_norm": 0.1494140625,
"learning_rate": 0.002091,
"loss": 1.2921,
"step": 697
},
{
"epoch": 0.0612088585134326,
"grad_norm": 0.07958984375,
"learning_rate": 0.002094,
"loss": 1.2661,
"step": 698
},
{
"epoch": 0.061296550287807144,
"grad_norm": 0.1259765625,
"learning_rate": 0.002097,
"loss": 1.3789,
"step": 699
},
{
"epoch": 0.06138424206218169,
"grad_norm": 0.0927734375,
"learning_rate": 0.0021,
"loss": 1.3957,
"step": 700
},
{
"epoch": 0.06147193383655623,
"grad_norm": 0.125,
"learning_rate": 0.002103,
"loss": 1.348,
"step": 701
},
{
"epoch": 0.06155962561093078,
"grad_norm": 0.103515625,
"learning_rate": 0.002106,
"loss": 1.3111,
"step": 702
},
{
"epoch": 0.06164731738530532,
"grad_norm": 0.08251953125,
"learning_rate": 0.0021089999999999998,
"loss": 1.321,
"step": 703
},
{
"epoch": 0.06173500915967987,
"grad_norm": 0.09814453125,
"learning_rate": 0.0021119999999999997,
"loss": 1.4042,
"step": 704
},
{
"epoch": 0.06182270093405442,
"grad_norm": 0.12890625,
"learning_rate": 0.002115,
"loss": 1.2891,
"step": 705
},
{
"epoch": 0.06191039270842896,
"grad_norm": 0.125,
"learning_rate": 0.002118,
"loss": 1.2994,
"step": 706
},
{
"epoch": 0.06199808448280351,
"grad_norm": 0.1640625,
"learning_rate": 0.002121,
"loss": 1.4005,
"step": 707
},
{
"epoch": 0.06208577625717805,
"grad_norm": 0.109375,
"learning_rate": 0.002124,
"loss": 1.3539,
"step": 708
},
{
"epoch": 0.062173468031552596,
"grad_norm": 0.1513671875,
"learning_rate": 0.002127,
"loss": 1.3263,
"step": 709
},
{
"epoch": 0.06226115980592714,
"grad_norm": 0.083984375,
"learning_rate": 0.00213,
"loss": 1.3272,
"step": 710
},
{
"epoch": 0.062348851580301685,
"grad_norm": 0.11865234375,
"learning_rate": 0.002133,
"loss": 1.3914,
"step": 711
},
{
"epoch": 0.06243654335467623,
"grad_norm": 0.0966796875,
"learning_rate": 0.002136,
"loss": 1.3741,
"step": 712
},
{
"epoch": 0.06252423512905078,
"grad_norm": 0.1298828125,
"learning_rate": 0.002139,
"loss": 1.3691,
"step": 713
},
{
"epoch": 0.06261192690342532,
"grad_norm": 0.08984375,
"learning_rate": 0.002142,
"loss": 1.3387,
"step": 714
},
{
"epoch": 0.06269961867779987,
"grad_norm": 0.095703125,
"learning_rate": 0.0021449999999999998,
"loss": 1.3177,
"step": 715
},
{
"epoch": 0.06278731045217441,
"grad_norm": 0.08447265625,
"learning_rate": 0.002148,
"loss": 1.3291,
"step": 716
},
{
"epoch": 0.06287500222654896,
"grad_norm": 0.115234375,
"learning_rate": 0.002151,
"loss": 1.3442,
"step": 717
},
{
"epoch": 0.06296269400092351,
"grad_norm": 0.08154296875,
"learning_rate": 0.002154,
"loss": 1.352,
"step": 718
},
{
"epoch": 0.06305038577529805,
"grad_norm": 0.12255859375,
"learning_rate": 0.002157,
"loss": 1.3209,
"step": 719
},
{
"epoch": 0.0631380775496726,
"grad_norm": 0.080078125,
"learning_rate": 0.00216,
"loss": 1.3168,
"step": 720
},
{
"epoch": 0.06322576932404714,
"grad_norm": 0.12060546875,
"learning_rate": 0.002163,
"loss": 1.3624,
"step": 721
},
{
"epoch": 0.06331346109842169,
"grad_norm": 0.09619140625,
"learning_rate": 0.002166,
"loss": 1.3289,
"step": 722
},
{
"epoch": 0.06340115287279623,
"grad_norm": 0.1416015625,
"learning_rate": 0.002169,
"loss": 1.3062,
"step": 723
},
{
"epoch": 0.06348884464717078,
"grad_norm": 0.09912109375,
"learning_rate": 0.002172,
"loss": 1.2927,
"step": 724
},
{
"epoch": 0.06357653642154532,
"grad_norm": 0.126953125,
"learning_rate": 0.002175,
"loss": 1.3349,
"step": 725
},
{
"epoch": 0.06366422819591987,
"grad_norm": 0.162109375,
"learning_rate": 0.002178,
"loss": 1.4218,
"step": 726
},
{
"epoch": 0.0637519199702944,
"grad_norm": 0.23828125,
"learning_rate": 0.0021809999999999998,
"loss": 1.4004,
"step": 727
},
{
"epoch": 0.06383961174466896,
"grad_norm": 0.1533203125,
"learning_rate": 0.002184,
"loss": 1.3236,
"step": 728
},
{
"epoch": 0.06392730351904351,
"grad_norm": 0.1640625,
"learning_rate": 0.002187,
"loss": 1.2873,
"step": 729
},
{
"epoch": 0.06401499529341804,
"grad_norm": 0.10400390625,
"learning_rate": 0.00219,
"loss": 1.3388,
"step": 730
},
{
"epoch": 0.0641026870677926,
"grad_norm": 0.16796875,
"learning_rate": 0.002193,
"loss": 1.3179,
"step": 731
},
{
"epoch": 0.06419037884216713,
"grad_norm": 0.1318359375,
"learning_rate": 0.002196,
"loss": 1.3483,
"step": 732
},
{
"epoch": 0.06427807061654169,
"grad_norm": 0.1357421875,
"learning_rate": 0.002199,
"loss": 1.3194,
"step": 733
},
{
"epoch": 0.06436576239091622,
"grad_norm": 0.1318359375,
"learning_rate": 0.002202,
"loss": 1.3404,
"step": 734
},
{
"epoch": 0.06445345416529077,
"grad_norm": 0.125,
"learning_rate": 0.002205,
"loss": 1.2977,
"step": 735
},
{
"epoch": 0.06454114593966531,
"grad_norm": 0.1083984375,
"learning_rate": 0.002208,
"loss": 1.362,
"step": 736
},
{
"epoch": 0.06462883771403986,
"grad_norm": 0.09423828125,
"learning_rate": 0.002211,
"loss": 1.3174,
"step": 737
},
{
"epoch": 0.06471652948841441,
"grad_norm": 0.1435546875,
"learning_rate": 0.002214,
"loss": 1.3242,
"step": 738
},
{
"epoch": 0.06480422126278895,
"grad_norm": 0.10888671875,
"learning_rate": 0.0022170000000000002,
"loss": 1.296,
"step": 739
},
{
"epoch": 0.0648919130371635,
"grad_norm": 0.08837890625,
"learning_rate": 0.00222,
"loss": 1.3848,
"step": 740
},
{
"epoch": 0.06497960481153804,
"grad_norm": 0.10693359375,
"learning_rate": 0.002223,
"loss": 1.3586,
"step": 741
},
{
"epoch": 0.06506729658591259,
"grad_norm": 0.09912109375,
"learning_rate": 0.002226,
"loss": 1.2866,
"step": 742
},
{
"epoch": 0.06515498836028713,
"grad_norm": 0.1044921875,
"learning_rate": 0.002229,
"loss": 1.2886,
"step": 743
},
{
"epoch": 0.06524268013466168,
"grad_norm": 0.10009765625,
"learning_rate": 0.002232,
"loss": 1.3764,
"step": 744
},
{
"epoch": 0.06533037190903622,
"grad_norm": 0.08837890625,
"learning_rate": 0.002235,
"loss": 1.3943,
"step": 745
},
{
"epoch": 0.06541806368341077,
"grad_norm": 0.12890625,
"learning_rate": 0.002238,
"loss": 1.3645,
"step": 746
},
{
"epoch": 0.06550575545778532,
"grad_norm": 0.0869140625,
"learning_rate": 0.002241,
"loss": 1.2752,
"step": 747
},
{
"epoch": 0.06559344723215986,
"grad_norm": 0.11474609375,
"learning_rate": 0.002244,
"loss": 1.2695,
"step": 748
},
{
"epoch": 0.06568113900653441,
"grad_norm": 0.1357421875,
"learning_rate": 0.002247,
"loss": 1.3008,
"step": 749
},
{
"epoch": 0.06576883078090895,
"grad_norm": 0.126953125,
"learning_rate": 0.0022500000000000003,
"loss": 1.2855,
"step": 750
},
{
"epoch": 0.0658565225552835,
"grad_norm": 0.09033203125,
"learning_rate": 0.0022530000000000002,
"loss": 1.3534,
"step": 751
},
{
"epoch": 0.06594421432965804,
"grad_norm": 0.08642578125,
"learning_rate": 0.002256,
"loss": 1.3676,
"step": 752
},
{
"epoch": 0.06603190610403259,
"grad_norm": 0.1279296875,
"learning_rate": 0.002259,
"loss": 1.2932,
"step": 753
},
{
"epoch": 0.06611959787840713,
"grad_norm": 0.1298828125,
"learning_rate": 0.002262,
"loss": 1.3035,
"step": 754
},
{
"epoch": 0.06620728965278168,
"grad_norm": 0.103515625,
"learning_rate": 0.002265,
"loss": 1.2848,
"step": 755
},
{
"epoch": 0.06629498142715623,
"grad_norm": 0.126953125,
"learning_rate": 0.002268,
"loss": 1.2856,
"step": 756
},
{
"epoch": 0.06638267320153077,
"grad_norm": 0.109375,
"learning_rate": 0.002271,
"loss": 1.3616,
"step": 757
},
{
"epoch": 0.06647036497590532,
"grad_norm": 0.1259765625,
"learning_rate": 0.002274,
"loss": 1.3047,
"step": 758
},
{
"epoch": 0.06655805675027986,
"grad_norm": 0.1689453125,
"learning_rate": 0.002277,
"loss": 1.4104,
"step": 759
},
{
"epoch": 0.06664574852465441,
"grad_norm": 0.11865234375,
"learning_rate": 0.00228,
"loss": 1.2926,
"step": 760
},
{
"epoch": 0.06673344029902895,
"grad_norm": 0.0986328125,
"learning_rate": 0.002283,
"loss": 1.3173,
"step": 761
},
{
"epoch": 0.0668211320734035,
"grad_norm": 0.1796875,
"learning_rate": 0.0022860000000000003,
"loss": 1.3172,
"step": 762
},
{
"epoch": 0.06690882384777803,
"grad_norm": 0.1201171875,
"learning_rate": 0.0022890000000000002,
"loss": 1.3202,
"step": 763
},
{
"epoch": 0.06699651562215259,
"grad_norm": 0.1435546875,
"learning_rate": 0.002292,
"loss": 1.3341,
"step": 764
},
{
"epoch": 0.06708420739652714,
"grad_norm": 0.1767578125,
"learning_rate": 0.002295,
"loss": 1.3316,
"step": 765
},
{
"epoch": 0.06717189917090167,
"grad_norm": 0.09716796875,
"learning_rate": 0.002298,
"loss": 1.3074,
"step": 766
},
{
"epoch": 0.06725959094527623,
"grad_norm": 0.26171875,
"learning_rate": 0.002301,
"loss": 1.3714,
"step": 767
},
{
"epoch": 0.06734728271965076,
"grad_norm": 0.21484375,
"learning_rate": 0.002304,
"loss": 1.3505,
"step": 768
},
{
"epoch": 0.06743497449402532,
"grad_norm": 0.091796875,
"learning_rate": 0.002307,
"loss": 1.2795,
"step": 769
},
{
"epoch": 0.06752266626839985,
"grad_norm": 0.146484375,
"learning_rate": 0.00231,
"loss": 1.2834,
"step": 770
},
{
"epoch": 0.0676103580427744,
"grad_norm": 0.09619140625,
"learning_rate": 0.002313,
"loss": 1.2901,
"step": 771
},
{
"epoch": 0.06769804981714894,
"grad_norm": 0.11083984375,
"learning_rate": 0.002316,
"loss": 1.2928,
"step": 772
},
{
"epoch": 0.0677857415915235,
"grad_norm": 0.0869140625,
"learning_rate": 0.0023190000000000003,
"loss": 1.3493,
"step": 773
},
{
"epoch": 0.06787343336589804,
"grad_norm": 0.1044921875,
"learning_rate": 0.0023220000000000003,
"loss": 1.3889,
"step": 774
},
{
"epoch": 0.06796112514027258,
"grad_norm": 0.10009765625,
"learning_rate": 0.0023250000000000002,
"loss": 1.2773,
"step": 775
},
{
"epoch": 0.06804881691464713,
"grad_norm": 0.08740234375,
"learning_rate": 0.002328,
"loss": 1.3119,
"step": 776
},
{
"epoch": 0.06813650868902167,
"grad_norm": 0.11376953125,
"learning_rate": 0.002331,
"loss": 1.3411,
"step": 777
},
{
"epoch": 0.06822420046339622,
"grad_norm": 0.0771484375,
"learning_rate": 0.002334,
"loss": 1.3557,
"step": 778
},
{
"epoch": 0.06831189223777076,
"grad_norm": 0.11669921875,
"learning_rate": 0.002337,
"loss": 1.3282,
"step": 779
},
{
"epoch": 0.06839958401214531,
"grad_norm": 0.07666015625,
"learning_rate": 0.00234,
"loss": 1.3127,
"step": 780
},
{
"epoch": 0.06848727578651985,
"grad_norm": 0.119140625,
"learning_rate": 0.002343,
"loss": 1.3505,
"step": 781
},
{
"epoch": 0.0685749675608944,
"grad_norm": 0.08447265625,
"learning_rate": 0.002346,
"loss": 1.368,
"step": 782
},
{
"epoch": 0.06866265933526895,
"grad_norm": 0.1572265625,
"learning_rate": 0.002349,
"loss": 1.3141,
"step": 783
},
{
"epoch": 0.06875035110964349,
"grad_norm": 0.0830078125,
"learning_rate": 0.002352,
"loss": 1.3129,
"step": 784
},
{
"epoch": 0.06883804288401804,
"grad_norm": 0.1240234375,
"learning_rate": 0.0023550000000000003,
"loss": 1.3506,
"step": 785
},
{
"epoch": 0.06892573465839258,
"grad_norm": 0.0888671875,
"learning_rate": 0.0023580000000000003,
"loss": 1.3198,
"step": 786
},
{
"epoch": 0.06901342643276713,
"grad_norm": 0.1162109375,
"learning_rate": 0.0023610000000000003,
"loss": 1.3936,
"step": 787
},
{
"epoch": 0.06910111820714167,
"grad_norm": 0.09326171875,
"learning_rate": 0.002364,
"loss": 1.3044,
"step": 788
},
{
"epoch": 0.06918880998151622,
"grad_norm": 0.13671875,
"learning_rate": 0.002367,
"loss": 1.3279,
"step": 789
},
{
"epoch": 0.06927650175589076,
"grad_norm": 0.134765625,
"learning_rate": 0.00237,
"loss": 1.3941,
"step": 790
},
{
"epoch": 0.06936419353026531,
"grad_norm": 0.08349609375,
"learning_rate": 0.002373,
"loss": 1.3437,
"step": 791
},
{
"epoch": 0.06945188530463986,
"grad_norm": 0.0859375,
"learning_rate": 0.002376,
"loss": 1.4024,
"step": 792
},
{
"epoch": 0.0695395770790144,
"grad_norm": 0.08642578125,
"learning_rate": 0.002379,
"loss": 1.3566,
"step": 793
},
{
"epoch": 0.06962726885338895,
"grad_norm": 0.09814453125,
"learning_rate": 0.002382,
"loss": 1.2291,
"step": 794
},
{
"epoch": 0.06971496062776349,
"grad_norm": 0.0849609375,
"learning_rate": 0.002385,
"loss": 1.3785,
"step": 795
},
{
"epoch": 0.06980265240213804,
"grad_norm": 0.1103515625,
"learning_rate": 0.0023880000000000004,
"loss": 1.3573,
"step": 796
},
{
"epoch": 0.06989034417651258,
"grad_norm": 0.140625,
"learning_rate": 0.0023910000000000003,
"loss": 1.3114,
"step": 797
},
{
"epoch": 0.06997803595088713,
"grad_norm": 0.09228515625,
"learning_rate": 0.0023940000000000003,
"loss": 1.3111,
"step": 798
},
{
"epoch": 0.07006572772526166,
"grad_norm": 0.1728515625,
"learning_rate": 0.0023970000000000003,
"loss": 1.416,
"step": 799
},
{
"epoch": 0.07015341949963622,
"grad_norm": 0.181640625,
"learning_rate": 0.0024000000000000002,
"loss": 1.2953,
"step": 800
},
{
"epoch": 0.07024111127401077,
"grad_norm": 0.150390625,
"learning_rate": 0.002403,
"loss": 1.3164,
"step": 801
},
{
"epoch": 0.0703288030483853,
"grad_norm": 0.205078125,
"learning_rate": 0.002406,
"loss": 1.44,
"step": 802
},
{
"epoch": 0.07041649482275986,
"grad_norm": 0.12158203125,
"learning_rate": 0.002409,
"loss": 1.3285,
"step": 803
},
{
"epoch": 0.0705041865971344,
"grad_norm": 0.11962890625,
"learning_rate": 0.002412,
"loss": 1.2978,
"step": 804
},
{
"epoch": 0.07059187837150895,
"grad_norm": 0.10791015625,
"learning_rate": 0.002415,
"loss": 1.3385,
"step": 805
},
{
"epoch": 0.07067957014588348,
"grad_norm": 0.16796875,
"learning_rate": 0.002418,
"loss": 1.2502,
"step": 806
},
{
"epoch": 0.07076726192025803,
"grad_norm": 0.09619140625,
"learning_rate": 0.0024210000000000004,
"loss": 1.2764,
"step": 807
},
{
"epoch": 0.07085495369463257,
"grad_norm": 0.1884765625,
"learning_rate": 0.0024240000000000004,
"loss": 1.306,
"step": 808
},
{
"epoch": 0.07094264546900712,
"grad_norm": 0.103515625,
"learning_rate": 0.0024270000000000003,
"loss": 1.2523,
"step": 809
},
{
"epoch": 0.07103033724338167,
"grad_norm": 0.251953125,
"learning_rate": 0.0024300000000000003,
"loss": 1.3436,
"step": 810
},
{
"epoch": 0.07111802901775621,
"grad_norm": 0.076171875,
"learning_rate": 0.0024330000000000003,
"loss": 1.329,
"step": 811
},
{
"epoch": 0.07120572079213076,
"grad_norm": 0.2578125,
"learning_rate": 0.0024360000000000002,
"loss": 1.3534,
"step": 812
},
{
"epoch": 0.0712934125665053,
"grad_norm": 0.0869140625,
"learning_rate": 0.0024389999999999998,
"loss": 1.2848,
"step": 813
},
{
"epoch": 0.07138110434087985,
"grad_norm": 0.1787109375,
"learning_rate": 0.0024419999999999997,
"loss": 1.3431,
"step": 814
},
{
"epoch": 0.07146879611525439,
"grad_norm": 0.09716796875,
"learning_rate": 0.0024449999999999997,
"loss": 1.2798,
"step": 815
},
{
"epoch": 0.07155648788962894,
"grad_norm": 0.1640625,
"learning_rate": 0.002448,
"loss": 1.3048,
"step": 816
},
{
"epoch": 0.07164417966400348,
"grad_norm": 0.11865234375,
"learning_rate": 0.002451,
"loss": 1.2944,
"step": 817
},
{
"epoch": 0.07173187143837803,
"grad_norm": 0.296875,
"learning_rate": 0.002454,
"loss": 1.4416,
"step": 818
},
{
"epoch": 0.07181956321275258,
"grad_norm": 0.1767578125,
"learning_rate": 0.002457,
"loss": 1.3192,
"step": 819
},
{
"epoch": 0.07190725498712712,
"grad_norm": 0.2138671875,
"learning_rate": 0.00246,
"loss": 1.3287,
"step": 820
},
{
"epoch": 0.07199494676150167,
"grad_norm": 0.1591796875,
"learning_rate": 0.002463,
"loss": 1.3128,
"step": 821
},
{
"epoch": 0.07208263853587621,
"grad_norm": 0.162109375,
"learning_rate": 0.002466,
"loss": 1.3772,
"step": 822
},
{
"epoch": 0.07217033031025076,
"grad_norm": 0.140625,
"learning_rate": 0.002469,
"loss": 1.3326,
"step": 823
},
{
"epoch": 0.0722580220846253,
"grad_norm": 0.16796875,
"learning_rate": 0.002472,
"loss": 1.3595,
"step": 824
},
{
"epoch": 0.07234571385899985,
"grad_norm": 0.12060546875,
"learning_rate": 0.0024749999999999998,
"loss": 1.3271,
"step": 825
},
{
"epoch": 0.07243340563337439,
"grad_norm": 0.169921875,
"learning_rate": 0.0024779999999999997,
"loss": 1.3399,
"step": 826
},
{
"epoch": 0.07252109740774894,
"grad_norm": 0.142578125,
"learning_rate": 0.002481,
"loss": 1.3322,
"step": 827
},
{
"epoch": 0.07260878918212349,
"grad_norm": 0.20703125,
"learning_rate": 0.002484,
"loss": 1.3868,
"step": 828
},
{
"epoch": 0.07269648095649803,
"grad_norm": 0.1767578125,
"learning_rate": 0.002487,
"loss": 1.3419,
"step": 829
},
{
"epoch": 0.07278417273087258,
"grad_norm": 0.1865234375,
"learning_rate": 0.00249,
"loss": 1.3186,
"step": 830
},
{
"epoch": 0.07287186450524712,
"grad_norm": 0.1640625,
"learning_rate": 0.002493,
"loss": 1.2891,
"step": 831
},
{
"epoch": 0.07295955627962167,
"grad_norm": 0.154296875,
"learning_rate": 0.002496,
"loss": 1.3262,
"step": 832
},
{
"epoch": 0.0730472480539962,
"grad_norm": 0.12451171875,
"learning_rate": 0.002499,
"loss": 1.3283,
"step": 833
},
{
"epoch": 0.07313493982837076,
"grad_norm": 0.10009765625,
"learning_rate": 0.002502,
"loss": 1.3316,
"step": 834
},
{
"epoch": 0.0732226316027453,
"grad_norm": 0.0888671875,
"learning_rate": 0.002505,
"loss": 1.324,
"step": 835
},
{
"epoch": 0.07331032337711985,
"grad_norm": 0.08984375,
"learning_rate": 0.002508,
"loss": 1.3424,
"step": 836
},
{
"epoch": 0.0733980151514944,
"grad_norm": 0.09765625,
"learning_rate": 0.0025109999999999998,
"loss": 1.3264,
"step": 837
},
{
"epoch": 0.07348570692586893,
"grad_norm": 0.08740234375,
"learning_rate": 0.0025139999999999997,
"loss": 1.3647,
"step": 838
},
{
"epoch": 0.07357339870024349,
"grad_norm": 0.10693359375,
"learning_rate": 0.002517,
"loss": 1.3417,
"step": 839
},
{
"epoch": 0.07366109047461802,
"grad_norm": 0.10888671875,
"learning_rate": 0.00252,
"loss": 1.2623,
"step": 840
},
{
"epoch": 0.07374878224899258,
"grad_norm": 0.142578125,
"learning_rate": 0.002523,
"loss": 1.3707,
"step": 841
},
{
"epoch": 0.07383647402336711,
"grad_norm": 0.10400390625,
"learning_rate": 0.002526,
"loss": 1.398,
"step": 842
},
{
"epoch": 0.07392416579774166,
"grad_norm": 0.09912109375,
"learning_rate": 0.002529,
"loss": 1.319,
"step": 843
},
{
"epoch": 0.0740118575721162,
"grad_norm": 0.11962890625,
"learning_rate": 0.002532,
"loss": 1.324,
"step": 844
},
{
"epoch": 0.07409954934649075,
"grad_norm": 0.16015625,
"learning_rate": 0.002535,
"loss": 1.2863,
"step": 845
},
{
"epoch": 0.0741872411208653,
"grad_norm": 0.1396484375,
"learning_rate": 0.002538,
"loss": 1.3849,
"step": 846
},
{
"epoch": 0.07427493289523984,
"grad_norm": 0.12353515625,
"learning_rate": 0.002541,
"loss": 1.3583,
"step": 847
},
{
"epoch": 0.0743626246696144,
"grad_norm": 0.1396484375,
"learning_rate": 0.002544,
"loss": 1.383,
"step": 848
},
{
"epoch": 0.07445031644398893,
"grad_norm": 0.1298828125,
"learning_rate": 0.002547,
"loss": 1.2858,
"step": 849
},
{
"epoch": 0.07453800821836348,
"grad_norm": 0.1904296875,
"learning_rate": 0.00255,
"loss": 1.3304,
"step": 850
},
{
"epoch": 0.07462569999273802,
"grad_norm": 0.1796875,
"learning_rate": 0.002553,
"loss": 1.4514,
"step": 851
},
{
"epoch": 0.07471339176711257,
"grad_norm": 0.177734375,
"learning_rate": 0.002556,
"loss": 1.3373,
"step": 852
},
{
"epoch": 0.07480108354148711,
"grad_norm": 0.1875,
"learning_rate": 0.002559,
"loss": 1.4162,
"step": 853
},
{
"epoch": 0.07488877531586166,
"grad_norm": 0.1484375,
"learning_rate": 0.002562,
"loss": 1.3607,
"step": 854
},
{
"epoch": 0.07497646709023621,
"grad_norm": 0.1435546875,
"learning_rate": 0.002565,
"loss": 1.3378,
"step": 855
},
{
"epoch": 0.07506415886461075,
"grad_norm": 0.1435546875,
"learning_rate": 0.002568,
"loss": 1.3712,
"step": 856
},
{
"epoch": 0.0751518506389853,
"grad_norm": 0.111328125,
"learning_rate": 0.002571,
"loss": 1.3485,
"step": 857
},
{
"epoch": 0.07523954241335984,
"grad_norm": 0.0869140625,
"learning_rate": 0.002574,
"loss": 1.3551,
"step": 858
},
{
"epoch": 0.07532723418773439,
"grad_norm": 0.072265625,
"learning_rate": 0.002577,
"loss": 1.3312,
"step": 859
},
{
"epoch": 0.07541492596210893,
"grad_norm": 0.09912109375,
"learning_rate": 0.00258,
"loss": 1.3454,
"step": 860
},
{
"epoch": 0.07550261773648348,
"grad_norm": 0.103515625,
"learning_rate": 0.0025830000000000002,
"loss": 1.3705,
"step": 861
},
{
"epoch": 0.07559030951085802,
"grad_norm": 0.11181640625,
"learning_rate": 0.002586,
"loss": 1.2941,
"step": 862
},
{
"epoch": 0.07567800128523257,
"grad_norm": 0.0947265625,
"learning_rate": 0.002589,
"loss": 1.3392,
"step": 863
},
{
"epoch": 0.07576569305960712,
"grad_norm": 0.1357421875,
"learning_rate": 0.002592,
"loss": 1.3258,
"step": 864
},
{
"epoch": 0.07585338483398166,
"grad_norm": 0.07568359375,
"learning_rate": 0.002595,
"loss": 1.288,
"step": 865
},
{
"epoch": 0.07594107660835621,
"grad_norm": 0.17578125,
"learning_rate": 0.002598,
"loss": 1.4316,
"step": 866
},
{
"epoch": 0.07602876838273075,
"grad_norm": 0.130859375,
"learning_rate": 0.002601,
"loss": 1.3078,
"step": 867
},
{
"epoch": 0.0761164601571053,
"grad_norm": 0.1552734375,
"learning_rate": 0.002604,
"loss": 1.2932,
"step": 868
},
{
"epoch": 0.07620415193147984,
"grad_norm": 0.1669921875,
"learning_rate": 0.002607,
"loss": 1.3165,
"step": 869
},
{
"epoch": 0.07629184370585439,
"grad_norm": 0.123046875,
"learning_rate": 0.00261,
"loss": 1.3193,
"step": 870
},
{
"epoch": 0.07637953548022892,
"grad_norm": 0.09765625,
"learning_rate": 0.002613,
"loss": 1.3277,
"step": 871
},
{
"epoch": 0.07646722725460348,
"grad_norm": 0.111328125,
"learning_rate": 0.002616,
"loss": 1.2847,
"step": 872
},
{
"epoch": 0.07655491902897803,
"grad_norm": 0.12158203125,
"learning_rate": 0.0026190000000000002,
"loss": 1.335,
"step": 873
},
{
"epoch": 0.07664261080335256,
"grad_norm": 0.09130859375,
"learning_rate": 0.002622,
"loss": 1.2934,
"step": 874
},
{
"epoch": 0.07673030257772712,
"grad_norm": 0.11181640625,
"learning_rate": 0.002625,
"loss": 1.3118,
"step": 875
},
{
"epoch": 0.07681799435210165,
"grad_norm": 0.08837890625,
"learning_rate": 0.002628,
"loss": 1.268,
"step": 876
},
{
"epoch": 0.0769056861264762,
"grad_norm": 0.1630859375,
"learning_rate": 0.002631,
"loss": 1.3718,
"step": 877
},
{
"epoch": 0.07699337790085074,
"grad_norm": 0.087890625,
"learning_rate": 0.002634,
"loss": 1.347,
"step": 878
},
{
"epoch": 0.0770810696752253,
"grad_norm": 0.1025390625,
"learning_rate": 0.002637,
"loss": 1.321,
"step": 879
},
{
"epoch": 0.07716876144959983,
"grad_norm": 0.10498046875,
"learning_rate": 0.00264,
"loss": 1.3698,
"step": 880
},
{
"epoch": 0.07725645322397438,
"grad_norm": 0.1083984375,
"learning_rate": 0.002643,
"loss": 1.3034,
"step": 881
},
{
"epoch": 0.07734414499834893,
"grad_norm": 0.08251953125,
"learning_rate": 0.002646,
"loss": 1.3008,
"step": 882
},
{
"epoch": 0.07743183677272347,
"grad_norm": 0.107421875,
"learning_rate": 0.002649,
"loss": 1.3692,
"step": 883
},
{
"epoch": 0.07751952854709802,
"grad_norm": 0.1044921875,
"learning_rate": 0.0026520000000000003,
"loss": 1.3591,
"step": 884
},
{
"epoch": 0.07760722032147256,
"grad_norm": 0.0732421875,
"learning_rate": 0.0026550000000000002,
"loss": 1.2839,
"step": 885
},
{
"epoch": 0.07769491209584711,
"grad_norm": 0.10107421875,
"learning_rate": 0.002658,
"loss": 1.3315,
"step": 886
},
{
"epoch": 0.07778260387022165,
"grad_norm": 0.10595703125,
"learning_rate": 0.002661,
"loss": 1.306,
"step": 887
},
{
"epoch": 0.0778702956445962,
"grad_norm": 0.103515625,
"learning_rate": 0.002664,
"loss": 1.3655,
"step": 888
},
{
"epoch": 0.07795798741897074,
"grad_norm": 0.0908203125,
"learning_rate": 0.002667,
"loss": 1.3113,
"step": 889
},
{
"epoch": 0.07804567919334529,
"grad_norm": 0.1884765625,
"learning_rate": 0.00267,
"loss": 1.3352,
"step": 890
},
{
"epoch": 0.07813337096771984,
"grad_norm": 0.1689453125,
"learning_rate": 0.002673,
"loss": 1.2859,
"step": 891
},
{
"epoch": 0.07822106274209438,
"grad_norm": 0.10205078125,
"learning_rate": 0.002676,
"loss": 1.3539,
"step": 892
},
{
"epoch": 0.07830875451646893,
"grad_norm": 0.234375,
"learning_rate": 0.002679,
"loss": 1.3543,
"step": 893
},
{
"epoch": 0.07839644629084347,
"grad_norm": 0.2080078125,
"learning_rate": 0.002682,
"loss": 1.3276,
"step": 894
},
{
"epoch": 0.07848413806521802,
"grad_norm": 0.10302734375,
"learning_rate": 0.0026850000000000003,
"loss": 1.3294,
"step": 895
},
{
"epoch": 0.07857182983959256,
"grad_norm": 0.111328125,
"learning_rate": 0.0026880000000000003,
"loss": 1.2468,
"step": 896
},
{
"epoch": 0.07865952161396711,
"grad_norm": 0.1845703125,
"learning_rate": 0.0026910000000000002,
"loss": 1.3238,
"step": 897
},
{
"epoch": 0.07874721338834165,
"grad_norm": 0.1767578125,
"learning_rate": 0.002694,
"loss": 1.3838,
"step": 898
},
{
"epoch": 0.0788349051627162,
"grad_norm": 0.205078125,
"learning_rate": 0.002697,
"loss": 1.3141,
"step": 899
},
{
"epoch": 0.07892259693709075,
"grad_norm": 0.1455078125,
"learning_rate": 0.0027,
"loss": 1.2963,
"step": 900
},
{
"epoch": 0.07901028871146529,
"grad_norm": 0.15234375,
"learning_rate": 0.002703,
"loss": 1.256,
"step": 901
},
{
"epoch": 0.07909798048583984,
"grad_norm": 0.1259765625,
"learning_rate": 0.002706,
"loss": 1.3802,
"step": 902
},
{
"epoch": 0.07918567226021438,
"grad_norm": 0.10693359375,
"learning_rate": 0.002709,
"loss": 1.2523,
"step": 903
},
{
"epoch": 0.07927336403458893,
"grad_norm": 0.11083984375,
"learning_rate": 0.002712,
"loss": 1.2966,
"step": 904
},
{
"epoch": 0.07936105580896347,
"grad_norm": 0.1103515625,
"learning_rate": 0.002715,
"loss": 1.2996,
"step": 905
},
{
"epoch": 0.07944874758333802,
"grad_norm": 0.10986328125,
"learning_rate": 0.002718,
"loss": 1.3653,
"step": 906
},
{
"epoch": 0.07953643935771255,
"grad_norm": 0.1396484375,
"learning_rate": 0.0027210000000000003,
"loss": 1.3168,
"step": 907
},
{
"epoch": 0.0796241311320871,
"grad_norm": 0.095703125,
"learning_rate": 0.0027240000000000003,
"loss": 1.4055,
"step": 908
},
{
"epoch": 0.07971182290646166,
"grad_norm": 0.1357421875,
"learning_rate": 0.0027270000000000003,
"loss": 1.3193,
"step": 909
},
{
"epoch": 0.0797995146808362,
"grad_norm": 0.09912109375,
"learning_rate": 0.0027300000000000002,
"loss": 1.3323,
"step": 910
},
{
"epoch": 0.07988720645521075,
"grad_norm": 0.193359375,
"learning_rate": 0.002733,
"loss": 1.361,
"step": 911
},
{
"epoch": 0.07997489822958528,
"grad_norm": 0.255859375,
"learning_rate": 0.002736,
"loss": 1.3238,
"step": 912
},
{
"epoch": 0.08006259000395984,
"grad_norm": 0.1005859375,
"learning_rate": 0.002739,
"loss": 1.3205,
"step": 913
},
{
"epoch": 0.08015028177833437,
"grad_norm": 0.111328125,
"learning_rate": 0.002742,
"loss": 1.2774,
"step": 914
},
{
"epoch": 0.08023797355270892,
"grad_norm": 0.09521484375,
"learning_rate": 0.002745,
"loss": 1.3301,
"step": 915
},
{
"epoch": 0.08032566532708346,
"grad_norm": 0.09423828125,
"learning_rate": 0.002748,
"loss": 1.3214,
"step": 916
},
{
"epoch": 0.08041335710145801,
"grad_norm": 0.1416015625,
"learning_rate": 0.002751,
"loss": 1.3142,
"step": 917
},
{
"epoch": 0.08050104887583256,
"grad_norm": 0.07470703125,
"learning_rate": 0.0027540000000000004,
"loss": 1.3766,
"step": 918
},
{
"epoch": 0.0805887406502071,
"grad_norm": 0.11376953125,
"learning_rate": 0.0027570000000000003,
"loss": 1.2802,
"step": 919
},
{
"epoch": 0.08067643242458165,
"grad_norm": 0.087890625,
"learning_rate": 0.0027600000000000003,
"loss": 1.3042,
"step": 920
},
{
"epoch": 0.08076412419895619,
"grad_norm": 0.10986328125,
"learning_rate": 0.0027630000000000003,
"loss": 1.3302,
"step": 921
},
{
"epoch": 0.08085181597333074,
"grad_norm": 0.1044921875,
"learning_rate": 0.0027660000000000002,
"loss": 1.3524,
"step": 922
},
{
"epoch": 0.08093950774770528,
"grad_norm": 0.10888671875,
"learning_rate": 0.002769,
"loss": 1.229,
"step": 923
},
{
"epoch": 0.08102719952207983,
"grad_norm": 0.10107421875,
"learning_rate": 0.002772,
"loss": 1.334,
"step": 924
},
{
"epoch": 0.08111489129645437,
"grad_norm": 0.1318359375,
"learning_rate": 0.002775,
"loss": 1.3263,
"step": 925
},
{
"epoch": 0.08120258307082892,
"grad_norm": 0.201171875,
"learning_rate": 0.002778,
"loss": 1.3455,
"step": 926
},
{
"epoch": 0.08129027484520347,
"grad_norm": 0.0849609375,
"learning_rate": 0.002781,
"loss": 1.3536,
"step": 927
},
{
"epoch": 0.08137796661957801,
"grad_norm": 0.11865234375,
"learning_rate": 0.002784,
"loss": 1.3306,
"step": 928
},
{
"epoch": 0.08146565839395256,
"grad_norm": 0.0771484375,
"learning_rate": 0.0027870000000000004,
"loss": 1.3462,
"step": 929
},
{
"epoch": 0.0815533501683271,
"grad_norm": 0.12890625,
"learning_rate": 0.0027900000000000004,
"loss": 1.2984,
"step": 930
},
{
"epoch": 0.08164104194270165,
"grad_norm": 0.10205078125,
"learning_rate": 0.0027930000000000003,
"loss": 1.3302,
"step": 931
},
{
"epoch": 0.08172873371707619,
"grad_norm": 0.1005859375,
"learning_rate": 0.0027960000000000003,
"loss": 1.356,
"step": 932
},
{
"epoch": 0.08181642549145074,
"grad_norm": 0.091796875,
"learning_rate": 0.0027990000000000003,
"loss": 1.3102,
"step": 933
},
{
"epoch": 0.08190411726582528,
"grad_norm": 0.10205078125,
"learning_rate": 0.0028020000000000002,
"loss": 1.4226,
"step": 934
},
{
"epoch": 0.08199180904019983,
"grad_norm": 0.0869140625,
"learning_rate": 0.002805,
"loss": 1.3052,
"step": 935
},
{
"epoch": 0.08207950081457437,
"grad_norm": 0.095703125,
"learning_rate": 0.002808,
"loss": 1.3552,
"step": 936
},
{
"epoch": 0.08216719258894892,
"grad_norm": 0.07958984375,
"learning_rate": 0.002811,
"loss": 1.3334,
"step": 937
},
{
"epoch": 0.08225488436332347,
"grad_norm": 0.15234375,
"learning_rate": 0.002814,
"loss": 1.3238,
"step": 938
},
{
"epoch": 0.082342576137698,
"grad_norm": 0.08154296875,
"learning_rate": 0.002817,
"loss": 1.2994,
"step": 939
},
{
"epoch": 0.08243026791207256,
"grad_norm": 0.12158203125,
"learning_rate": 0.00282,
"loss": 1.2673,
"step": 940
},
{
"epoch": 0.0825179596864471,
"grad_norm": 0.126953125,
"learning_rate": 0.002823,
"loss": 1.2656,
"step": 941
},
{
"epoch": 0.08260565146082165,
"grad_norm": 0.10009765625,
"learning_rate": 0.002826,
"loss": 1.3713,
"step": 942
},
{
"epoch": 0.08269334323519618,
"grad_norm": 0.27734375,
"learning_rate": 0.002829,
"loss": 1.3689,
"step": 943
},
{
"epoch": 0.08278103500957074,
"grad_norm": 0.1533203125,
"learning_rate": 0.002832,
"loss": 1.3078,
"step": 944
},
{
"epoch": 0.08286872678394527,
"grad_norm": 0.201171875,
"learning_rate": 0.002835,
"loss": 1.3463,
"step": 945
},
{
"epoch": 0.08295641855831982,
"grad_norm": 0.2109375,
"learning_rate": 0.002838,
"loss": 1.2728,
"step": 946
},
{
"epoch": 0.08304411033269438,
"grad_norm": 0.10205078125,
"learning_rate": 0.0028409999999999998,
"loss": 1.2866,
"step": 947
},
{
"epoch": 0.08313180210706891,
"grad_norm": 0.09375,
"learning_rate": 0.0028439999999999997,
"loss": 1.3144,
"step": 948
},
{
"epoch": 0.08321949388144347,
"grad_norm": 0.10986328125,
"learning_rate": 0.002847,
"loss": 1.2829,
"step": 949
},
{
"epoch": 0.083307185655818,
"grad_norm": 0.07763671875,
"learning_rate": 0.00285,
"loss": 1.3705,
"step": 950
},
{
"epoch": 0.08339487743019255,
"grad_norm": 0.107421875,
"learning_rate": 0.002853,
"loss": 1.3597,
"step": 951
},
{
"epoch": 0.08348256920456709,
"grad_norm": 0.10791015625,
"learning_rate": 0.002856,
"loss": 1.3528,
"step": 952
},
{
"epoch": 0.08357026097894164,
"grad_norm": 0.126953125,
"learning_rate": 0.002859,
"loss": 1.3411,
"step": 953
},
{
"epoch": 0.08365795275331618,
"grad_norm": 0.1376953125,
"learning_rate": 0.002862,
"loss": 1.3656,
"step": 954
},
{
"epoch": 0.08374564452769073,
"grad_norm": 0.259765625,
"learning_rate": 0.002865,
"loss": 1.3907,
"step": 955
},
{
"epoch": 0.08383333630206528,
"grad_norm": 0.10986328125,
"learning_rate": 0.002868,
"loss": 1.3499,
"step": 956
},
{
"epoch": 0.08392102807643982,
"grad_norm": 0.13671875,
"learning_rate": 0.002871,
"loss": 1.326,
"step": 957
},
{
"epoch": 0.08400871985081437,
"grad_norm": 0.11083984375,
"learning_rate": 0.002874,
"loss": 1.2958,
"step": 958
},
{
"epoch": 0.08409641162518891,
"grad_norm": 0.087890625,
"learning_rate": 0.002877,
"loss": 1.2943,
"step": 959
},
{
"epoch": 0.08418410339956346,
"grad_norm": 0.15625,
"learning_rate": 0.0028799999999999997,
"loss": 1.2968,
"step": 960
},
{
"epoch": 0.084271795173938,
"grad_norm": 0.119140625,
"learning_rate": 0.002883,
"loss": 1.3424,
"step": 961
},
{
"epoch": 0.08435948694831255,
"grad_norm": 0.130859375,
"learning_rate": 0.002886,
"loss": 1.326,
"step": 962
},
{
"epoch": 0.08444717872268709,
"grad_norm": 0.1845703125,
"learning_rate": 0.002889,
"loss": 1.3498,
"step": 963
},
{
"epoch": 0.08453487049706164,
"grad_norm": 0.1787109375,
"learning_rate": 0.002892,
"loss": 1.2791,
"step": 964
},
{
"epoch": 0.08462256227143619,
"grad_norm": 0.0849609375,
"learning_rate": 0.002895,
"loss": 1.3582,
"step": 965
},
{
"epoch": 0.08471025404581073,
"grad_norm": 0.1337890625,
"learning_rate": 0.002898,
"loss": 1.3123,
"step": 966
},
{
"epoch": 0.08479794582018528,
"grad_norm": 0.1689453125,
"learning_rate": 0.002901,
"loss": 1.3389,
"step": 967
},
{
"epoch": 0.08488563759455982,
"grad_norm": 0.111328125,
"learning_rate": 0.002904,
"loss": 1.3073,
"step": 968
},
{
"epoch": 0.08497332936893437,
"grad_norm": 0.388671875,
"learning_rate": 0.002907,
"loss": 1.3919,
"step": 969
},
{
"epoch": 0.08506102114330891,
"grad_norm": 0.255859375,
"learning_rate": 0.00291,
"loss": 1.3619,
"step": 970
},
{
"epoch": 0.08514871291768346,
"grad_norm": 0.1748046875,
"learning_rate": 0.002913,
"loss": 1.2457,
"step": 971
},
{
"epoch": 0.085236404692058,
"grad_norm": 0.140625,
"learning_rate": 0.002916,
"loss": 1.3531,
"step": 972
},
{
"epoch": 0.08532409646643255,
"grad_norm": 0.19921875,
"learning_rate": 0.002919,
"loss": 1.3493,
"step": 973
},
{
"epoch": 0.0854117882408071,
"grad_norm": 0.1865234375,
"learning_rate": 0.002922,
"loss": 1.3564,
"step": 974
},
{
"epoch": 0.08549948001518164,
"grad_norm": 0.1572265625,
"learning_rate": 0.002925,
"loss": 1.3556,
"step": 975
},
{
"epoch": 0.08558717178955619,
"grad_norm": 0.10791015625,
"learning_rate": 0.002928,
"loss": 1.3064,
"step": 976
},
{
"epoch": 0.08567486356393073,
"grad_norm": 0.181640625,
"learning_rate": 0.002931,
"loss": 1.3248,
"step": 977
},
{
"epoch": 0.08576255533830528,
"grad_norm": 0.09619140625,
"learning_rate": 0.002934,
"loss": 1.2805,
"step": 978
},
{
"epoch": 0.08585024711267981,
"grad_norm": 0.138671875,
"learning_rate": 0.002937,
"loss": 1.3346,
"step": 979
},
{
"epoch": 0.08593793888705437,
"grad_norm": 0.08740234375,
"learning_rate": 0.00294,
"loss": 1.2495,
"step": 980
},
{
"epoch": 0.0860256306614289,
"grad_norm": 0.10693359375,
"learning_rate": 0.002943,
"loss": 1.3576,
"step": 981
},
{
"epoch": 0.08611332243580345,
"grad_norm": 0.0751953125,
"learning_rate": 0.002946,
"loss": 1.2991,
"step": 982
},
{
"epoch": 0.086201014210178,
"grad_norm": 0.08935546875,
"learning_rate": 0.0029490000000000002,
"loss": 1.4072,
"step": 983
},
{
"epoch": 0.08628870598455254,
"grad_norm": 0.10546875,
"learning_rate": 0.002952,
"loss": 1.3707,
"step": 984
},
{
"epoch": 0.0863763977589271,
"grad_norm": 0.095703125,
"learning_rate": 0.002955,
"loss": 1.3455,
"step": 985
},
{
"epoch": 0.08646408953330163,
"grad_norm": 0.099609375,
"learning_rate": 0.002958,
"loss": 1.3252,
"step": 986
},
{
"epoch": 0.08655178130767618,
"grad_norm": 0.08935546875,
"learning_rate": 0.002961,
"loss": 1.3285,
"step": 987
},
{
"epoch": 0.08663947308205072,
"grad_norm": 0.1328125,
"learning_rate": 0.002964,
"loss": 1.4067,
"step": 988
},
{
"epoch": 0.08672716485642527,
"grad_norm": 0.10009765625,
"learning_rate": 0.002967,
"loss": 1.321,
"step": 989
},
{
"epoch": 0.08681485663079981,
"grad_norm": 0.111328125,
"learning_rate": 0.00297,
"loss": 1.3814,
"step": 990
},
{
"epoch": 0.08690254840517436,
"grad_norm": 0.1953125,
"learning_rate": 0.002973,
"loss": 1.3265,
"step": 991
},
{
"epoch": 0.08699024017954891,
"grad_norm": 0.1865234375,
"learning_rate": 0.002976,
"loss": 1.4072,
"step": 992
},
{
"epoch": 0.08707793195392345,
"grad_norm": 0.10302734375,
"learning_rate": 0.002979,
"loss": 1.3561,
"step": 993
},
{
"epoch": 0.087165623728298,
"grad_norm": 0.08349609375,
"learning_rate": 0.002982,
"loss": 1.3047,
"step": 994
},
{
"epoch": 0.08725331550267254,
"grad_norm": 0.0888671875,
"learning_rate": 0.0029850000000000002,
"loss": 1.2992,
"step": 995
},
{
"epoch": 0.08734100727704709,
"grad_norm": 0.11572265625,
"learning_rate": 0.002988,
"loss": 1.3093,
"step": 996
},
{
"epoch": 0.08742869905142163,
"grad_norm": 0.08984375,
"learning_rate": 0.002991,
"loss": 1.3291,
"step": 997
},
{
"epoch": 0.08751639082579618,
"grad_norm": 0.1484375,
"learning_rate": 0.002994,
"loss": 1.4177,
"step": 998
},
{
"epoch": 0.08760408260017072,
"grad_norm": 0.1513671875,
"learning_rate": 0.002997,
"loss": 1.2768,
"step": 999
},
{
"epoch": 0.08769177437454527,
"grad_norm": 0.10205078125,
"learning_rate": 0.003,
"loss": 1.3108,
"step": 1000
},
{
"epoch": 0.08769177437454527,
"eval_loss": 1.3424164056777954,
"eval_runtime": 429.1223,
"eval_samples_per_second": 33.666,
"eval_steps_per_second": 8.417,
"step": 1000
}
],
"logging_steps": 1.0,
"max_steps": 11403,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.948783792128e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}