DuongTrongChi's picture
Training in progress, step 242, checkpoint
3719691 verified
raw
history blame
42 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.663923182441701,
"eval_steps": 500,
"global_step": 242,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027434842249657062,
"grad_norm": 1.902702808380127,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.0959,
"step": 1
},
{
"epoch": 0.0054869684499314125,
"grad_norm": 1.8936893939971924,
"learning_rate": 4.000000000000001e-06,
"loss": 2.099,
"step": 2
},
{
"epoch": 0.00823045267489712,
"grad_norm": 1.9235605001449585,
"learning_rate": 6e-06,
"loss": 2.1045,
"step": 3
},
{
"epoch": 0.010973936899862825,
"grad_norm": 1.7873843908309937,
"learning_rate": 8.000000000000001e-06,
"loss": 2.0144,
"step": 4
},
{
"epoch": 0.013717421124828532,
"grad_norm": 1.8321126699447632,
"learning_rate": 1e-05,
"loss": 2.0872,
"step": 5
},
{
"epoch": 0.01646090534979424,
"grad_norm": 2.0333194732666016,
"learning_rate": 1.2e-05,
"loss": 2.1146,
"step": 6
},
{
"epoch": 0.019204389574759947,
"grad_norm": 1.763102650642395,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.036,
"step": 7
},
{
"epoch": 0.02194787379972565,
"grad_norm": 1.5287824869155884,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.984,
"step": 8
},
{
"epoch": 0.024691358024691357,
"grad_norm": 1.2035481929779053,
"learning_rate": 1.8e-05,
"loss": 1.928,
"step": 9
},
{
"epoch": 0.027434842249657063,
"grad_norm": 1.0602883100509644,
"learning_rate": 2e-05,
"loss": 1.9322,
"step": 10
},
{
"epoch": 0.03017832647462277,
"grad_norm": 0.9456723928451538,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.9257,
"step": 11
},
{
"epoch": 0.03292181069958848,
"grad_norm": 0.7646191120147705,
"learning_rate": 2.4e-05,
"loss": 1.8057,
"step": 12
},
{
"epoch": 0.03566529492455418,
"grad_norm": 0.6718866229057312,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.8535,
"step": 13
},
{
"epoch": 0.038408779149519894,
"grad_norm": 0.5136308073997498,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.8533,
"step": 14
},
{
"epoch": 0.0411522633744856,
"grad_norm": 0.4066430628299713,
"learning_rate": 3e-05,
"loss": 1.7867,
"step": 15
},
{
"epoch": 0.0438957475994513,
"grad_norm": 0.3591379225254059,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.8729,
"step": 16
},
{
"epoch": 0.04663923182441701,
"grad_norm": 0.3689568340778351,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.8186,
"step": 17
},
{
"epoch": 0.04938271604938271,
"grad_norm": 0.3991197645664215,
"learning_rate": 3.6e-05,
"loss": 1.8485,
"step": 18
},
{
"epoch": 0.05212620027434842,
"grad_norm": 0.3603827655315399,
"learning_rate": 3.8e-05,
"loss": 1.8081,
"step": 19
},
{
"epoch": 0.05486968449931413,
"grad_norm": 0.3805089592933655,
"learning_rate": 4e-05,
"loss": 1.7553,
"step": 20
},
{
"epoch": 0.05761316872427984,
"grad_norm": 0.42597419023513794,
"learning_rate": 4.2e-05,
"loss": 1.8115,
"step": 21
},
{
"epoch": 0.06035665294924554,
"grad_norm": 0.37801623344421387,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.7161,
"step": 22
},
{
"epoch": 0.06310013717421124,
"grad_norm": 0.35600143671035767,
"learning_rate": 4.600000000000001e-05,
"loss": 1.7315,
"step": 23
},
{
"epoch": 0.06584362139917696,
"grad_norm": 0.4111214280128479,
"learning_rate": 4.8e-05,
"loss": 1.7091,
"step": 24
},
{
"epoch": 0.06858710562414266,
"grad_norm": 0.4117395579814911,
"learning_rate": 5e-05,
"loss": 1.7026,
"step": 25
},
{
"epoch": 0.07133058984910837,
"grad_norm": 0.4069993197917938,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.6733,
"step": 26
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.4196024239063263,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.6999,
"step": 27
},
{
"epoch": 0.07681755829903979,
"grad_norm": 0.45915013551712036,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.6613,
"step": 28
},
{
"epoch": 0.07956104252400549,
"grad_norm": 0.44673749804496765,
"learning_rate": 5.8e-05,
"loss": 1.7039,
"step": 29
},
{
"epoch": 0.0823045267489712,
"grad_norm": 0.5381506085395813,
"learning_rate": 6e-05,
"loss": 1.5768,
"step": 30
},
{
"epoch": 0.0850480109739369,
"grad_norm": 0.4311385750770569,
"learning_rate": 6.2e-05,
"loss": 1.5598,
"step": 31
},
{
"epoch": 0.0877914951989026,
"grad_norm": 0.38209667801856995,
"learning_rate": 6.400000000000001e-05,
"loss": 1.511,
"step": 32
},
{
"epoch": 0.09053497942386832,
"grad_norm": 0.35374918580055237,
"learning_rate": 6.6e-05,
"loss": 1.4692,
"step": 33
},
{
"epoch": 0.09327846364883402,
"grad_norm": 0.4020269215106964,
"learning_rate": 6.800000000000001e-05,
"loss": 1.5276,
"step": 34
},
{
"epoch": 0.09602194787379972,
"grad_norm": 0.46055856347084045,
"learning_rate": 7e-05,
"loss": 1.4634,
"step": 35
},
{
"epoch": 0.09876543209876543,
"grad_norm": 0.4301123023033142,
"learning_rate": 7.2e-05,
"loss": 1.4151,
"step": 36
},
{
"epoch": 0.10150891632373114,
"grad_norm": 0.34214499592781067,
"learning_rate": 7.4e-05,
"loss": 1.4451,
"step": 37
},
{
"epoch": 0.10425240054869685,
"grad_norm": 0.24707239866256714,
"learning_rate": 7.6e-05,
"loss": 1.3434,
"step": 38
},
{
"epoch": 0.10699588477366255,
"grad_norm": 0.16371050477027893,
"learning_rate": 7.800000000000001e-05,
"loss": 1.4134,
"step": 39
},
{
"epoch": 0.10973936899862825,
"grad_norm": 0.11528003960847855,
"learning_rate": 8e-05,
"loss": 1.4896,
"step": 40
},
{
"epoch": 0.11248285322359397,
"grad_norm": 0.11622235178947449,
"learning_rate": 8.2e-05,
"loss": 1.3957,
"step": 41
},
{
"epoch": 0.11522633744855967,
"grad_norm": 0.10983709245920181,
"learning_rate": 8.4e-05,
"loss": 1.421,
"step": 42
},
{
"epoch": 0.11796982167352538,
"grad_norm": 0.10124485194683075,
"learning_rate": 8.6e-05,
"loss": 1.4127,
"step": 43
},
{
"epoch": 0.12071330589849108,
"grad_norm": 0.10291855037212372,
"learning_rate": 8.800000000000001e-05,
"loss": 1.4187,
"step": 44
},
{
"epoch": 0.12345679012345678,
"grad_norm": 0.10925430059432983,
"learning_rate": 9e-05,
"loss": 1.3476,
"step": 45
},
{
"epoch": 0.1262002743484225,
"grad_norm": 0.10825473070144653,
"learning_rate": 9.200000000000001e-05,
"loss": 1.4427,
"step": 46
},
{
"epoch": 0.1289437585733882,
"grad_norm": 0.10768264532089233,
"learning_rate": 9.4e-05,
"loss": 1.4292,
"step": 47
},
{
"epoch": 0.13168724279835392,
"grad_norm": 0.11937709152698517,
"learning_rate": 9.6e-05,
"loss": 1.3234,
"step": 48
},
{
"epoch": 0.13443072702331962,
"grad_norm": 0.11960555613040924,
"learning_rate": 9.8e-05,
"loss": 1.3566,
"step": 49
},
{
"epoch": 0.13717421124828533,
"grad_norm": 0.12491138279438019,
"learning_rate": 0.0001,
"loss": 1.4201,
"step": 50
},
{
"epoch": 0.13991769547325103,
"grad_norm": 0.11906778067350388,
"learning_rate": 0.00010200000000000001,
"loss": 1.3745,
"step": 51
},
{
"epoch": 0.14266117969821673,
"grad_norm": 0.12701214849948883,
"learning_rate": 0.00010400000000000001,
"loss": 1.3592,
"step": 52
},
{
"epoch": 0.14540466392318244,
"grad_norm": 0.14920316636562347,
"learning_rate": 0.00010600000000000002,
"loss": 1.3139,
"step": 53
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.157944455742836,
"learning_rate": 0.00010800000000000001,
"loss": 1.3704,
"step": 54
},
{
"epoch": 0.15089163237311384,
"grad_norm": 0.15193891525268555,
"learning_rate": 0.00011000000000000002,
"loss": 1.3694,
"step": 55
},
{
"epoch": 0.15363511659807957,
"grad_norm": 0.15829141438007355,
"learning_rate": 0.00011200000000000001,
"loss": 1.3351,
"step": 56
},
{
"epoch": 0.15637860082304528,
"grad_norm": 0.16939309239387512,
"learning_rate": 0.00011399999999999999,
"loss": 1.3487,
"step": 57
},
{
"epoch": 0.15912208504801098,
"grad_norm": 0.19683969020843506,
"learning_rate": 0.000116,
"loss": 1.2837,
"step": 58
},
{
"epoch": 0.16186556927297668,
"grad_norm": 0.17148783802986145,
"learning_rate": 0.000118,
"loss": 1.3372,
"step": 59
},
{
"epoch": 0.1646090534979424,
"grad_norm": 0.17560726404190063,
"learning_rate": 0.00012,
"loss": 1.3896,
"step": 60
},
{
"epoch": 0.1673525377229081,
"grad_norm": 0.17536282539367676,
"learning_rate": 0.000122,
"loss": 1.4028,
"step": 61
},
{
"epoch": 0.1700960219478738,
"grad_norm": 0.16491512954235077,
"learning_rate": 0.000124,
"loss": 1.3366,
"step": 62
},
{
"epoch": 0.1728395061728395,
"grad_norm": 0.1202845573425293,
"learning_rate": 0.000126,
"loss": 1.3201,
"step": 63
},
{
"epoch": 0.1755829903978052,
"grad_norm": 0.10146432369947433,
"learning_rate": 0.00012800000000000002,
"loss": 1.3083,
"step": 64
},
{
"epoch": 0.17832647462277093,
"grad_norm": 0.0989551916718483,
"learning_rate": 0.00013000000000000002,
"loss": 1.3216,
"step": 65
},
{
"epoch": 0.18106995884773663,
"grad_norm": 0.09368593990802765,
"learning_rate": 0.000132,
"loss": 1.3409,
"step": 66
},
{
"epoch": 0.18381344307270234,
"grad_norm": 0.09617207944393158,
"learning_rate": 0.000134,
"loss": 1.3192,
"step": 67
},
{
"epoch": 0.18655692729766804,
"grad_norm": 0.08890332281589508,
"learning_rate": 0.00013600000000000003,
"loss": 1.354,
"step": 68
},
{
"epoch": 0.18930041152263374,
"grad_norm": 0.11371646821498871,
"learning_rate": 0.000138,
"loss": 1.3201,
"step": 69
},
{
"epoch": 0.19204389574759945,
"grad_norm": 0.09785107523202896,
"learning_rate": 0.00014,
"loss": 1.2413,
"step": 70
},
{
"epoch": 0.19478737997256515,
"grad_norm": 0.09149904549121857,
"learning_rate": 0.000142,
"loss": 1.2548,
"step": 71
},
{
"epoch": 0.19753086419753085,
"grad_norm": 0.08837990462779999,
"learning_rate": 0.000144,
"loss": 1.3014,
"step": 72
},
{
"epoch": 0.20027434842249658,
"grad_norm": 0.08963413536548615,
"learning_rate": 0.000146,
"loss": 1.3128,
"step": 73
},
{
"epoch": 0.2030178326474623,
"grad_norm": 0.08815225213766098,
"learning_rate": 0.000148,
"loss": 1.3321,
"step": 74
},
{
"epoch": 0.205761316872428,
"grad_norm": 0.09394700825214386,
"learning_rate": 0.00015000000000000001,
"loss": 1.3341,
"step": 75
},
{
"epoch": 0.2085048010973937,
"grad_norm": 0.10041660070419312,
"learning_rate": 0.000152,
"loss": 1.2944,
"step": 76
},
{
"epoch": 0.2112482853223594,
"grad_norm": 0.09344102442264557,
"learning_rate": 0.000154,
"loss": 1.3226,
"step": 77
},
{
"epoch": 0.2139917695473251,
"grad_norm": 0.09259933233261108,
"learning_rate": 0.00015600000000000002,
"loss": 1.2942,
"step": 78
},
{
"epoch": 0.2167352537722908,
"grad_norm": 0.09426167607307434,
"learning_rate": 0.00015800000000000002,
"loss": 1.333,
"step": 79
},
{
"epoch": 0.2194787379972565,
"grad_norm": 0.09674811363220215,
"learning_rate": 0.00016,
"loss": 1.3242,
"step": 80
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.09802138805389404,
"learning_rate": 0.000162,
"loss": 1.3322,
"step": 81
},
{
"epoch": 0.22496570644718794,
"grad_norm": 0.09528470784425735,
"learning_rate": 0.000164,
"loss": 1.2384,
"step": 82
},
{
"epoch": 0.22770919067215364,
"grad_norm": 0.0997876301407814,
"learning_rate": 0.000166,
"loss": 1.3646,
"step": 83
},
{
"epoch": 0.23045267489711935,
"grad_norm": 0.09820306301116943,
"learning_rate": 0.000168,
"loss": 1.1982,
"step": 84
},
{
"epoch": 0.23319615912208505,
"grad_norm": 0.10137173533439636,
"learning_rate": 0.00017,
"loss": 1.2697,
"step": 85
},
{
"epoch": 0.23593964334705075,
"grad_norm": 0.09000196307897568,
"learning_rate": 0.000172,
"loss": 1.3122,
"step": 86
},
{
"epoch": 0.23868312757201646,
"grad_norm": 0.12272510677576065,
"learning_rate": 0.000174,
"loss": 1.3238,
"step": 87
},
{
"epoch": 0.24142661179698216,
"grad_norm": 0.10101604461669922,
"learning_rate": 0.00017600000000000002,
"loss": 1.3637,
"step": 88
},
{
"epoch": 0.24417009602194786,
"grad_norm": 0.10622192174196243,
"learning_rate": 0.00017800000000000002,
"loss": 1.2577,
"step": 89
},
{
"epoch": 0.24691358024691357,
"grad_norm": 0.10139253735542297,
"learning_rate": 0.00018,
"loss": 1.303,
"step": 90
},
{
"epoch": 0.2496570644718793,
"grad_norm": 0.09918926656246185,
"learning_rate": 0.000182,
"loss": 1.2377,
"step": 91
},
{
"epoch": 0.252400548696845,
"grad_norm": 0.11826573312282562,
"learning_rate": 0.00018400000000000003,
"loss": 1.3773,
"step": 92
},
{
"epoch": 0.2551440329218107,
"grad_norm": 0.10880250483751297,
"learning_rate": 0.00018600000000000002,
"loss": 1.3056,
"step": 93
},
{
"epoch": 0.2578875171467764,
"grad_norm": 0.1100454330444336,
"learning_rate": 0.000188,
"loss": 1.258,
"step": 94
},
{
"epoch": 0.2606310013717421,
"grad_norm": 0.1200387105345726,
"learning_rate": 0.00019,
"loss": 1.3362,
"step": 95
},
{
"epoch": 0.26337448559670784,
"grad_norm": 0.10868213325738907,
"learning_rate": 0.000192,
"loss": 1.3175,
"step": 96
},
{
"epoch": 0.2661179698216735,
"grad_norm": 0.11683013290166855,
"learning_rate": 0.000194,
"loss": 1.301,
"step": 97
},
{
"epoch": 0.26886145404663925,
"grad_norm": 0.11990080773830414,
"learning_rate": 0.000196,
"loss": 1.2372,
"step": 98
},
{
"epoch": 0.2716049382716049,
"grad_norm": 0.1380293071269989,
"learning_rate": 0.00019800000000000002,
"loss": 1.2915,
"step": 99
},
{
"epoch": 0.27434842249657065,
"grad_norm": 0.14780984818935394,
"learning_rate": 0.0002,
"loss": 1.2423,
"step": 100
},
{
"epoch": 0.27709190672153633,
"grad_norm": 0.11625576764345169,
"learning_rate": 0.00019924242424242426,
"loss": 1.2613,
"step": 101
},
{
"epoch": 0.27983539094650206,
"grad_norm": 0.16299262642860413,
"learning_rate": 0.0001984848484848485,
"loss": 1.2527,
"step": 102
},
{
"epoch": 0.2825788751714678,
"grad_norm": 0.12003400921821594,
"learning_rate": 0.00019772727272727273,
"loss": 1.2459,
"step": 103
},
{
"epoch": 0.28532235939643347,
"grad_norm": 0.11611975729465485,
"learning_rate": 0.00019696969696969698,
"loss": 1.2736,
"step": 104
},
{
"epoch": 0.2880658436213992,
"grad_norm": 0.12332016229629517,
"learning_rate": 0.00019621212121212123,
"loss": 1.2161,
"step": 105
},
{
"epoch": 0.2908093278463649,
"grad_norm": 0.12996245920658112,
"learning_rate": 0.00019545454545454548,
"loss": 1.2254,
"step": 106
},
{
"epoch": 0.2935528120713306,
"grad_norm": 0.11514672636985779,
"learning_rate": 0.0001946969696969697,
"loss": 1.2367,
"step": 107
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.13371898233890533,
"learning_rate": 0.00019393939393939395,
"loss": 1.2172,
"step": 108
},
{
"epoch": 0.299039780521262,
"grad_norm": 0.1204146221280098,
"learning_rate": 0.0001931818181818182,
"loss": 1.3121,
"step": 109
},
{
"epoch": 0.3017832647462277,
"grad_norm": 0.11520334333181381,
"learning_rate": 0.00019242424242424245,
"loss": 1.2051,
"step": 110
},
{
"epoch": 0.3045267489711934,
"grad_norm": 0.15397988259792328,
"learning_rate": 0.00019166666666666667,
"loss": 1.2829,
"step": 111
},
{
"epoch": 0.30727023319615915,
"grad_norm": 0.12115694582462311,
"learning_rate": 0.00019090909090909092,
"loss": 1.3109,
"step": 112
},
{
"epoch": 0.3100137174211248,
"grad_norm": 0.12799453735351562,
"learning_rate": 0.00019015151515151517,
"loss": 1.2836,
"step": 113
},
{
"epoch": 0.31275720164609055,
"grad_norm": 0.1297687143087387,
"learning_rate": 0.00018939393939393942,
"loss": 1.2813,
"step": 114
},
{
"epoch": 0.31550068587105623,
"grad_norm": 0.13718412816524506,
"learning_rate": 0.00018863636363636364,
"loss": 1.3138,
"step": 115
},
{
"epoch": 0.31824417009602196,
"grad_norm": 0.12237361073493958,
"learning_rate": 0.0001878787878787879,
"loss": 1.1938,
"step": 116
},
{
"epoch": 0.32098765432098764,
"grad_norm": 0.12502606213092804,
"learning_rate": 0.00018712121212121212,
"loss": 1.3258,
"step": 117
},
{
"epoch": 0.32373113854595337,
"grad_norm": 0.13772045075893402,
"learning_rate": 0.00018636363636363636,
"loss": 1.3305,
"step": 118
},
{
"epoch": 0.32647462277091904,
"grad_norm": 0.1165667474269867,
"learning_rate": 0.00018560606060606061,
"loss": 1.2473,
"step": 119
},
{
"epoch": 0.3292181069958848,
"grad_norm": 0.12208285182714462,
"learning_rate": 0.00018484848484848484,
"loss": 1.2755,
"step": 120
},
{
"epoch": 0.3319615912208505,
"grad_norm": 0.13750925660133362,
"learning_rate": 0.00018409090909090909,
"loss": 1.178,
"step": 121
},
{
"epoch": 0.3347050754458162,
"grad_norm": 0.12554235756397247,
"learning_rate": 0.00018333333333333334,
"loss": 1.2727,
"step": 122
},
{
"epoch": 0.3374485596707819,
"grad_norm": 0.153937429189682,
"learning_rate": 0.00018257575757575758,
"loss": 1.2232,
"step": 123
},
{
"epoch": 0.3401920438957476,
"grad_norm": 0.12629558145999908,
"learning_rate": 0.00018181818181818183,
"loss": 1.1856,
"step": 124
},
{
"epoch": 0.3429355281207133,
"grad_norm": 0.13697242736816406,
"learning_rate": 0.00018106060606060606,
"loss": 1.3235,
"step": 125
},
{
"epoch": 0.345679012345679,
"grad_norm": 0.15304319560527802,
"learning_rate": 0.0001803030303030303,
"loss": 1.2457,
"step": 126
},
{
"epoch": 0.3484224965706447,
"grad_norm": 0.1399148851633072,
"learning_rate": 0.00017954545454545456,
"loss": 1.2907,
"step": 127
},
{
"epoch": 0.3511659807956104,
"grad_norm": 0.1822618842124939,
"learning_rate": 0.0001787878787878788,
"loss": 1.3335,
"step": 128
},
{
"epoch": 0.35390946502057613,
"grad_norm": 0.16444697976112366,
"learning_rate": 0.00017803030303030303,
"loss": 1.3234,
"step": 129
},
{
"epoch": 0.35665294924554186,
"grad_norm": 0.15233512222766876,
"learning_rate": 0.00017727272727272728,
"loss": 1.2672,
"step": 130
},
{
"epoch": 0.35939643347050754,
"grad_norm": 0.16545739769935608,
"learning_rate": 0.00017651515151515153,
"loss": 1.175,
"step": 131
},
{
"epoch": 0.36213991769547327,
"grad_norm": 0.13627974689006805,
"learning_rate": 0.00017575757575757578,
"loss": 1.2611,
"step": 132
},
{
"epoch": 0.36488340192043894,
"grad_norm": 0.1535719484090805,
"learning_rate": 0.000175,
"loss": 1.2334,
"step": 133
},
{
"epoch": 0.3676268861454047,
"grad_norm": 0.1347316950559616,
"learning_rate": 0.00017424242424242425,
"loss": 1.2259,
"step": 134
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.1347058117389679,
"learning_rate": 0.0001734848484848485,
"loss": 1.135,
"step": 135
},
{
"epoch": 0.3731138545953361,
"grad_norm": 0.1388654112815857,
"learning_rate": 0.00017272727272727275,
"loss": 1.2217,
"step": 136
},
{
"epoch": 0.37585733882030176,
"grad_norm": 0.14038826525211334,
"learning_rate": 0.00017196969696969697,
"loss": 1.2837,
"step": 137
},
{
"epoch": 0.3786008230452675,
"grad_norm": 0.1578507125377655,
"learning_rate": 0.00017121212121212122,
"loss": 1.2717,
"step": 138
},
{
"epoch": 0.3813443072702332,
"grad_norm": 0.13014937937259674,
"learning_rate": 0.00017045454545454547,
"loss": 1.2604,
"step": 139
},
{
"epoch": 0.3840877914951989,
"grad_norm": 0.16863836348056793,
"learning_rate": 0.00016969696969696972,
"loss": 1.2479,
"step": 140
},
{
"epoch": 0.3868312757201646,
"grad_norm": 0.1496235728263855,
"learning_rate": 0.00016893939393939394,
"loss": 1.1911,
"step": 141
},
{
"epoch": 0.3895747599451303,
"grad_norm": 0.15035606920719147,
"learning_rate": 0.0001681818181818182,
"loss": 1.2756,
"step": 142
},
{
"epoch": 0.39231824417009603,
"grad_norm": 0.14892420172691345,
"learning_rate": 0.00016742424242424244,
"loss": 1.199,
"step": 143
},
{
"epoch": 0.3950617283950617,
"grad_norm": 0.1547059863805771,
"learning_rate": 0.0001666666666666667,
"loss": 1.2301,
"step": 144
},
{
"epoch": 0.39780521262002744,
"grad_norm": 0.13619515299797058,
"learning_rate": 0.00016590909090909094,
"loss": 1.1821,
"step": 145
},
{
"epoch": 0.40054869684499317,
"grad_norm": 0.15868829190731049,
"learning_rate": 0.00016515151515151516,
"loss": 1.2416,
"step": 146
},
{
"epoch": 0.40329218106995884,
"grad_norm": 0.13451522588729858,
"learning_rate": 0.0001643939393939394,
"loss": 1.2108,
"step": 147
},
{
"epoch": 0.4060356652949246,
"grad_norm": 0.14629404246807098,
"learning_rate": 0.00016363636363636366,
"loss": 1.2176,
"step": 148
},
{
"epoch": 0.40877914951989025,
"grad_norm": 0.14986877143383026,
"learning_rate": 0.0001628787878787879,
"loss": 1.2373,
"step": 149
},
{
"epoch": 0.411522633744856,
"grad_norm": 0.14896810054779053,
"learning_rate": 0.00016212121212121213,
"loss": 1.2124,
"step": 150
},
{
"epoch": 0.41426611796982166,
"grad_norm": 0.1467796117067337,
"learning_rate": 0.00016136363636363635,
"loss": 1.3202,
"step": 151
},
{
"epoch": 0.4170096021947874,
"grad_norm": 0.1488669067621231,
"learning_rate": 0.0001606060606060606,
"loss": 1.2315,
"step": 152
},
{
"epoch": 0.41975308641975306,
"grad_norm": 0.14375552535057068,
"learning_rate": 0.00015984848484848485,
"loss": 1.2641,
"step": 153
},
{
"epoch": 0.4224965706447188,
"grad_norm": 0.1794748306274414,
"learning_rate": 0.0001590909090909091,
"loss": 1.1754,
"step": 154
},
{
"epoch": 0.4252400548696845,
"grad_norm": 0.13613805174827576,
"learning_rate": 0.00015833333333333332,
"loss": 1.2114,
"step": 155
},
{
"epoch": 0.4279835390946502,
"grad_norm": 0.1772301197052002,
"learning_rate": 0.00015757575757575757,
"loss": 1.2146,
"step": 156
},
{
"epoch": 0.43072702331961593,
"grad_norm": 0.18257774412631989,
"learning_rate": 0.00015681818181818182,
"loss": 1.2129,
"step": 157
},
{
"epoch": 0.4334705075445816,
"grad_norm": 0.1565171182155609,
"learning_rate": 0.00015606060606060607,
"loss": 1.3144,
"step": 158
},
{
"epoch": 0.43621399176954734,
"grad_norm": 0.14858128130435944,
"learning_rate": 0.0001553030303030303,
"loss": 1.262,
"step": 159
},
{
"epoch": 0.438957475994513,
"grad_norm": 0.16517716646194458,
"learning_rate": 0.00015454545454545454,
"loss": 1.2895,
"step": 160
},
{
"epoch": 0.44170096021947874,
"grad_norm": 0.16663017868995667,
"learning_rate": 0.0001537878787878788,
"loss": 1.2847,
"step": 161
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.17813093960285187,
"learning_rate": 0.00015303030303030304,
"loss": 1.1901,
"step": 162
},
{
"epoch": 0.44718792866941015,
"grad_norm": 0.2013212889432907,
"learning_rate": 0.00015227272727272727,
"loss": 1.2106,
"step": 163
},
{
"epoch": 0.4499314128943759,
"grad_norm": 0.16856490075588226,
"learning_rate": 0.00015151515151515152,
"loss": 1.1806,
"step": 164
},
{
"epoch": 0.45267489711934156,
"grad_norm": 0.18135228753089905,
"learning_rate": 0.00015075757575757576,
"loss": 1.2848,
"step": 165
},
{
"epoch": 0.4554183813443073,
"grad_norm": 0.18058203160762787,
"learning_rate": 0.00015000000000000001,
"loss": 1.2576,
"step": 166
},
{
"epoch": 0.45816186556927296,
"grad_norm": 0.14680561423301697,
"learning_rate": 0.00014924242424242426,
"loss": 1.2322,
"step": 167
},
{
"epoch": 0.4609053497942387,
"grad_norm": 0.1470465362071991,
"learning_rate": 0.00014848484848484849,
"loss": 1.241,
"step": 168
},
{
"epoch": 0.46364883401920437,
"grad_norm": 0.14876408874988556,
"learning_rate": 0.00014772727272727274,
"loss": 1.2268,
"step": 169
},
{
"epoch": 0.4663923182441701,
"grad_norm": 0.14918828010559082,
"learning_rate": 0.00014696969696969698,
"loss": 1.3067,
"step": 170
},
{
"epoch": 0.4691358024691358,
"grad_norm": 0.14815697073936462,
"learning_rate": 0.00014621212121212123,
"loss": 1.2492,
"step": 171
},
{
"epoch": 0.4718792866941015,
"grad_norm": 0.13748030364513397,
"learning_rate": 0.00014545454545454546,
"loss": 1.2281,
"step": 172
},
{
"epoch": 0.47462277091906724,
"grad_norm": 0.14712287485599518,
"learning_rate": 0.0001446969696969697,
"loss": 1.1438,
"step": 173
},
{
"epoch": 0.4773662551440329,
"grad_norm": 0.13734129071235657,
"learning_rate": 0.00014393939393939396,
"loss": 1.2528,
"step": 174
},
{
"epoch": 0.48010973936899864,
"grad_norm": 0.15113233029842377,
"learning_rate": 0.0001431818181818182,
"loss": 1.2387,
"step": 175
},
{
"epoch": 0.4828532235939643,
"grad_norm": 0.14522749185562134,
"learning_rate": 0.00014242424242424243,
"loss": 1.2214,
"step": 176
},
{
"epoch": 0.48559670781893005,
"grad_norm": 0.14445781707763672,
"learning_rate": 0.00014166666666666668,
"loss": 1.2424,
"step": 177
},
{
"epoch": 0.4883401920438957,
"grad_norm": 0.15134842693805695,
"learning_rate": 0.00014090909090909093,
"loss": 1.2522,
"step": 178
},
{
"epoch": 0.49108367626886146,
"grad_norm": 0.15704961121082306,
"learning_rate": 0.00014015151515151518,
"loss": 1.2584,
"step": 179
},
{
"epoch": 0.49382716049382713,
"grad_norm": 0.153547003865242,
"learning_rate": 0.0001393939393939394,
"loss": 1.1958,
"step": 180
},
{
"epoch": 0.49657064471879286,
"grad_norm": 0.16817179322242737,
"learning_rate": 0.00013863636363636365,
"loss": 1.1739,
"step": 181
},
{
"epoch": 0.4993141289437586,
"grad_norm": 0.14305363595485687,
"learning_rate": 0.0001378787878787879,
"loss": 1.1919,
"step": 182
},
{
"epoch": 0.5020576131687243,
"grad_norm": 0.1474563330411911,
"learning_rate": 0.00013712121212121212,
"loss": 1.2232,
"step": 183
},
{
"epoch": 0.50480109739369,
"grad_norm": 0.1495485007762909,
"learning_rate": 0.00013636363636363637,
"loss": 1.2423,
"step": 184
},
{
"epoch": 0.5075445816186557,
"grad_norm": 0.1455039083957672,
"learning_rate": 0.0001356060606060606,
"loss": 1.2178,
"step": 185
},
{
"epoch": 0.5102880658436214,
"grad_norm": 0.1525765359401703,
"learning_rate": 0.00013484848484848484,
"loss": 1.2798,
"step": 186
},
{
"epoch": 0.5130315500685871,
"grad_norm": 0.15652745962142944,
"learning_rate": 0.0001340909090909091,
"loss": 1.1691,
"step": 187
},
{
"epoch": 0.5157750342935528,
"grad_norm": 0.1435556709766388,
"learning_rate": 0.00013333333333333334,
"loss": 1.2331,
"step": 188
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.14547987282276154,
"learning_rate": 0.00013257575757575756,
"loss": 1.2087,
"step": 189
},
{
"epoch": 0.5212620027434842,
"grad_norm": 0.15346300601959229,
"learning_rate": 0.0001318181818181818,
"loss": 1.2744,
"step": 190
},
{
"epoch": 0.52400548696845,
"grad_norm": 0.155302494764328,
"learning_rate": 0.00013106060606060606,
"loss": 1.2769,
"step": 191
},
{
"epoch": 0.5267489711934157,
"grad_norm": 0.1592315286397934,
"learning_rate": 0.0001303030303030303,
"loss": 1.1989,
"step": 192
},
{
"epoch": 0.5294924554183813,
"grad_norm": 0.1477937400341034,
"learning_rate": 0.00012954545454545456,
"loss": 1.195,
"step": 193
},
{
"epoch": 0.532235939643347,
"grad_norm": 0.1779361218214035,
"learning_rate": 0.00012878787878787878,
"loss": 1.1984,
"step": 194
},
{
"epoch": 0.5349794238683128,
"grad_norm": 0.16755440831184387,
"learning_rate": 0.00012803030303030303,
"loss": 1.1867,
"step": 195
},
{
"epoch": 0.5377229080932785,
"grad_norm": 0.14698271453380585,
"learning_rate": 0.00012727272727272728,
"loss": 1.2407,
"step": 196
},
{
"epoch": 0.5404663923182441,
"grad_norm": 0.22619858384132385,
"learning_rate": 0.00012651515151515153,
"loss": 1.2623,
"step": 197
},
{
"epoch": 0.5432098765432098,
"grad_norm": 0.15332822501659393,
"learning_rate": 0.00012575757575757575,
"loss": 1.2113,
"step": 198
},
{
"epoch": 0.5459533607681756,
"grad_norm": 0.15453462302684784,
"learning_rate": 0.000125,
"loss": 1.157,
"step": 199
},
{
"epoch": 0.5486968449931413,
"grad_norm": 0.18716047704219818,
"learning_rate": 0.00012424242424242425,
"loss": 1.1896,
"step": 200
},
{
"epoch": 0.551440329218107,
"grad_norm": 0.1628562957048416,
"learning_rate": 0.0001234848484848485,
"loss": 1.2035,
"step": 201
},
{
"epoch": 0.5541838134430727,
"grad_norm": 0.15352866053581238,
"learning_rate": 0.00012272727272727272,
"loss": 1.2529,
"step": 202
},
{
"epoch": 0.5569272976680384,
"grad_norm": 0.1607903093099594,
"learning_rate": 0.00012196969696969697,
"loss": 1.224,
"step": 203
},
{
"epoch": 0.5596707818930041,
"grad_norm": 0.19524335861206055,
"learning_rate": 0.00012121212121212122,
"loss": 1.0918,
"step": 204
},
{
"epoch": 0.5624142661179699,
"grad_norm": 0.17055654525756836,
"learning_rate": 0.00012045454545454546,
"loss": 1.2339,
"step": 205
},
{
"epoch": 0.5651577503429356,
"grad_norm": 0.1699044108390808,
"learning_rate": 0.00011969696969696971,
"loss": 1.1918,
"step": 206
},
{
"epoch": 0.5679012345679012,
"grad_norm": 0.20004670321941376,
"learning_rate": 0.00011893939393939394,
"loss": 1.2308,
"step": 207
},
{
"epoch": 0.5706447187928669,
"grad_norm": 0.20924247801303864,
"learning_rate": 0.0001181818181818182,
"loss": 1.2527,
"step": 208
},
{
"epoch": 0.5733882030178327,
"grad_norm": 0.16315655410289764,
"learning_rate": 0.00011742424242424244,
"loss": 1.1913,
"step": 209
},
{
"epoch": 0.5761316872427984,
"grad_norm": 0.224049374461174,
"learning_rate": 0.00011666666666666668,
"loss": 1.2381,
"step": 210
},
{
"epoch": 0.578875171467764,
"grad_norm": 0.24203604459762573,
"learning_rate": 0.00011590909090909093,
"loss": 1.1433,
"step": 211
},
{
"epoch": 0.5816186556927297,
"grad_norm": 0.18014276027679443,
"learning_rate": 0.00011515151515151516,
"loss": 1.2695,
"step": 212
},
{
"epoch": 0.5843621399176955,
"grad_norm": 0.22670245170593262,
"learning_rate": 0.00011439393939393941,
"loss": 1.1642,
"step": 213
},
{
"epoch": 0.5871056241426612,
"grad_norm": 0.250273197889328,
"learning_rate": 0.00011363636363636365,
"loss": 1.2256,
"step": 214
},
{
"epoch": 0.5898491083676269,
"grad_norm": 0.18230263888835907,
"learning_rate": 0.0001128787878787879,
"loss": 1.1579,
"step": 215
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.17414681613445282,
"learning_rate": 0.00011212121212121212,
"loss": 1.185,
"step": 216
},
{
"epoch": 0.5953360768175583,
"grad_norm": 0.25959229469299316,
"learning_rate": 0.00011136363636363636,
"loss": 1.2697,
"step": 217
},
{
"epoch": 0.598079561042524,
"grad_norm": 0.21464407444000244,
"learning_rate": 0.00011060606060606061,
"loss": 1.2269,
"step": 218
},
{
"epoch": 0.6008230452674898,
"grad_norm": 0.1585656851530075,
"learning_rate": 0.00010984848484848484,
"loss": 1.116,
"step": 219
},
{
"epoch": 0.6035665294924554,
"grad_norm": 0.28340986371040344,
"learning_rate": 0.00010909090909090909,
"loss": 1.2534,
"step": 220
},
{
"epoch": 0.6063100137174211,
"grad_norm": 0.2463023066520691,
"learning_rate": 0.00010833333333333333,
"loss": 1.2119,
"step": 221
},
{
"epoch": 0.6090534979423868,
"grad_norm": 0.1642024666070938,
"learning_rate": 0.00010757575757575758,
"loss": 1.1755,
"step": 222
},
{
"epoch": 0.6117969821673526,
"grad_norm": 0.2119123786687851,
"learning_rate": 0.00010681818181818181,
"loss": 1.1784,
"step": 223
},
{
"epoch": 0.6145404663923183,
"grad_norm": 0.20988301932811737,
"learning_rate": 0.00010606060606060606,
"loss": 1.1841,
"step": 224
},
{
"epoch": 0.6172839506172839,
"grad_norm": 0.1982959508895874,
"learning_rate": 0.0001053030303030303,
"loss": 1.2508,
"step": 225
},
{
"epoch": 0.6200274348422496,
"grad_norm": 0.1641291379928589,
"learning_rate": 0.00010454545454545455,
"loss": 1.1552,
"step": 226
},
{
"epoch": 0.6227709190672154,
"grad_norm": 0.2212403416633606,
"learning_rate": 0.00010378787878787878,
"loss": 1.2479,
"step": 227
},
{
"epoch": 0.6255144032921811,
"grad_norm": 0.2342177778482437,
"learning_rate": 0.00010303030303030303,
"loss": 1.2185,
"step": 228
},
{
"epoch": 0.6282578875171467,
"grad_norm": 0.15451593697071075,
"learning_rate": 0.00010227272727272727,
"loss": 1.2433,
"step": 229
},
{
"epoch": 0.6310013717421125,
"grad_norm": 0.16344501078128815,
"learning_rate": 0.00010151515151515152,
"loss": 1.1227,
"step": 230
},
{
"epoch": 0.6337448559670782,
"grad_norm": 0.15936411917209625,
"learning_rate": 0.00010075757575757576,
"loss": 1.1841,
"step": 231
},
{
"epoch": 0.6364883401920439,
"grad_norm": 0.2086990624666214,
"learning_rate": 0.0001,
"loss": 1.2414,
"step": 232
},
{
"epoch": 0.6392318244170097,
"grad_norm": 0.18307948112487793,
"learning_rate": 9.924242424242425e-05,
"loss": 1.1255,
"step": 233
},
{
"epoch": 0.6419753086419753,
"grad_norm": 0.1552293747663498,
"learning_rate": 9.848484848484849e-05,
"loss": 1.1812,
"step": 234
},
{
"epoch": 0.644718792866941,
"grad_norm": 0.19036468863487244,
"learning_rate": 9.772727272727274e-05,
"loss": 1.1467,
"step": 235
},
{
"epoch": 0.6474622770919067,
"grad_norm": 0.15272115170955658,
"learning_rate": 9.696969696969698e-05,
"loss": 1.2155,
"step": 236
},
{
"epoch": 0.6502057613168725,
"grad_norm": 0.19770941138267517,
"learning_rate": 9.621212121212123e-05,
"loss": 1.1268,
"step": 237
},
{
"epoch": 0.6529492455418381,
"grad_norm": 0.1657804250717163,
"learning_rate": 9.545454545454546e-05,
"loss": 1.1862,
"step": 238
},
{
"epoch": 0.6556927297668038,
"grad_norm": 0.1653299182653427,
"learning_rate": 9.469696969696971e-05,
"loss": 1.2015,
"step": 239
},
{
"epoch": 0.6584362139917695,
"grad_norm": 0.17784593999385834,
"learning_rate": 9.393939393939395e-05,
"loss": 1.2368,
"step": 240
},
{
"epoch": 0.6611796982167353,
"grad_norm": 0.15533973276615143,
"learning_rate": 9.318181818181818e-05,
"loss": 1.2155,
"step": 241
},
{
"epoch": 0.663923182441701,
"grad_norm": 0.16223277151584625,
"learning_rate": 9.242424242424242e-05,
"loss": 1.2732,
"step": 242
}
],
"logging_steps": 1,
"max_steps": 364,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.995266789026488e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}