DuongTrongChi's picture
Training in progress, step 148, checkpoint
1e1115f verified
raw
history blame
25.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4060356652949246,
"eval_steps": 500,
"global_step": 148,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027434842249657062,
"grad_norm": 1.902702808380127,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.0959,
"step": 1
},
{
"epoch": 0.0054869684499314125,
"grad_norm": 1.8936893939971924,
"learning_rate": 4.000000000000001e-06,
"loss": 2.099,
"step": 2
},
{
"epoch": 0.00823045267489712,
"grad_norm": 1.9235605001449585,
"learning_rate": 6e-06,
"loss": 2.1045,
"step": 3
},
{
"epoch": 0.010973936899862825,
"grad_norm": 1.7873843908309937,
"learning_rate": 8.000000000000001e-06,
"loss": 2.0144,
"step": 4
},
{
"epoch": 0.013717421124828532,
"grad_norm": 1.8321126699447632,
"learning_rate": 1e-05,
"loss": 2.0872,
"step": 5
},
{
"epoch": 0.01646090534979424,
"grad_norm": 2.0333194732666016,
"learning_rate": 1.2e-05,
"loss": 2.1146,
"step": 6
},
{
"epoch": 0.019204389574759947,
"grad_norm": 1.763102650642395,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.036,
"step": 7
},
{
"epoch": 0.02194787379972565,
"grad_norm": 1.5287824869155884,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.984,
"step": 8
},
{
"epoch": 0.024691358024691357,
"grad_norm": 1.2035481929779053,
"learning_rate": 1.8e-05,
"loss": 1.928,
"step": 9
},
{
"epoch": 0.027434842249657063,
"grad_norm": 1.0602883100509644,
"learning_rate": 2e-05,
"loss": 1.9322,
"step": 10
},
{
"epoch": 0.03017832647462277,
"grad_norm": 0.9456723928451538,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.9257,
"step": 11
},
{
"epoch": 0.03292181069958848,
"grad_norm": 0.7646191120147705,
"learning_rate": 2.4e-05,
"loss": 1.8057,
"step": 12
},
{
"epoch": 0.03566529492455418,
"grad_norm": 0.6718866229057312,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.8535,
"step": 13
},
{
"epoch": 0.038408779149519894,
"grad_norm": 0.5136308073997498,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.8533,
"step": 14
},
{
"epoch": 0.0411522633744856,
"grad_norm": 0.4066430628299713,
"learning_rate": 3e-05,
"loss": 1.7867,
"step": 15
},
{
"epoch": 0.0438957475994513,
"grad_norm": 0.3591379225254059,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.8729,
"step": 16
},
{
"epoch": 0.04663923182441701,
"grad_norm": 0.3689568340778351,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.8186,
"step": 17
},
{
"epoch": 0.04938271604938271,
"grad_norm": 0.3991197645664215,
"learning_rate": 3.6e-05,
"loss": 1.8485,
"step": 18
},
{
"epoch": 0.05212620027434842,
"grad_norm": 0.3603827655315399,
"learning_rate": 3.8e-05,
"loss": 1.8081,
"step": 19
},
{
"epoch": 0.05486968449931413,
"grad_norm": 0.3805089592933655,
"learning_rate": 4e-05,
"loss": 1.7553,
"step": 20
},
{
"epoch": 0.05761316872427984,
"grad_norm": 0.42597419023513794,
"learning_rate": 4.2e-05,
"loss": 1.8115,
"step": 21
},
{
"epoch": 0.06035665294924554,
"grad_norm": 0.37801623344421387,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.7161,
"step": 22
},
{
"epoch": 0.06310013717421124,
"grad_norm": 0.35600143671035767,
"learning_rate": 4.600000000000001e-05,
"loss": 1.7315,
"step": 23
},
{
"epoch": 0.06584362139917696,
"grad_norm": 0.4111214280128479,
"learning_rate": 4.8e-05,
"loss": 1.7091,
"step": 24
},
{
"epoch": 0.06858710562414266,
"grad_norm": 0.4117395579814911,
"learning_rate": 5e-05,
"loss": 1.7026,
"step": 25
},
{
"epoch": 0.07133058984910837,
"grad_norm": 0.4069993197917938,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.6733,
"step": 26
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.4196024239063263,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.6999,
"step": 27
},
{
"epoch": 0.07681755829903979,
"grad_norm": 0.45915013551712036,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.6613,
"step": 28
},
{
"epoch": 0.07956104252400549,
"grad_norm": 0.44673749804496765,
"learning_rate": 5.8e-05,
"loss": 1.7039,
"step": 29
},
{
"epoch": 0.0823045267489712,
"grad_norm": 0.5381506085395813,
"learning_rate": 6e-05,
"loss": 1.5768,
"step": 30
},
{
"epoch": 0.0850480109739369,
"grad_norm": 0.4311385750770569,
"learning_rate": 6.2e-05,
"loss": 1.5598,
"step": 31
},
{
"epoch": 0.0877914951989026,
"grad_norm": 0.38209667801856995,
"learning_rate": 6.400000000000001e-05,
"loss": 1.511,
"step": 32
},
{
"epoch": 0.09053497942386832,
"grad_norm": 0.35374918580055237,
"learning_rate": 6.6e-05,
"loss": 1.4692,
"step": 33
},
{
"epoch": 0.09327846364883402,
"grad_norm": 0.4020269215106964,
"learning_rate": 6.800000000000001e-05,
"loss": 1.5276,
"step": 34
},
{
"epoch": 0.09602194787379972,
"grad_norm": 0.46055856347084045,
"learning_rate": 7e-05,
"loss": 1.4634,
"step": 35
},
{
"epoch": 0.09876543209876543,
"grad_norm": 0.4301123023033142,
"learning_rate": 7.2e-05,
"loss": 1.4151,
"step": 36
},
{
"epoch": 0.10150891632373114,
"grad_norm": 0.34214499592781067,
"learning_rate": 7.4e-05,
"loss": 1.4451,
"step": 37
},
{
"epoch": 0.10425240054869685,
"grad_norm": 0.24707239866256714,
"learning_rate": 7.6e-05,
"loss": 1.3434,
"step": 38
},
{
"epoch": 0.10699588477366255,
"grad_norm": 0.16371050477027893,
"learning_rate": 7.800000000000001e-05,
"loss": 1.4134,
"step": 39
},
{
"epoch": 0.10973936899862825,
"grad_norm": 0.11528003960847855,
"learning_rate": 8e-05,
"loss": 1.4896,
"step": 40
},
{
"epoch": 0.11248285322359397,
"grad_norm": 0.11622235178947449,
"learning_rate": 8.2e-05,
"loss": 1.3957,
"step": 41
},
{
"epoch": 0.11522633744855967,
"grad_norm": 0.10983709245920181,
"learning_rate": 8.4e-05,
"loss": 1.421,
"step": 42
},
{
"epoch": 0.11796982167352538,
"grad_norm": 0.10124485194683075,
"learning_rate": 8.6e-05,
"loss": 1.4127,
"step": 43
},
{
"epoch": 0.12071330589849108,
"grad_norm": 0.10291855037212372,
"learning_rate": 8.800000000000001e-05,
"loss": 1.4187,
"step": 44
},
{
"epoch": 0.12345679012345678,
"grad_norm": 0.10925430059432983,
"learning_rate": 9e-05,
"loss": 1.3476,
"step": 45
},
{
"epoch": 0.1262002743484225,
"grad_norm": 0.10825473070144653,
"learning_rate": 9.200000000000001e-05,
"loss": 1.4427,
"step": 46
},
{
"epoch": 0.1289437585733882,
"grad_norm": 0.10768264532089233,
"learning_rate": 9.4e-05,
"loss": 1.4292,
"step": 47
},
{
"epoch": 0.13168724279835392,
"grad_norm": 0.11937709152698517,
"learning_rate": 9.6e-05,
"loss": 1.3234,
"step": 48
},
{
"epoch": 0.13443072702331962,
"grad_norm": 0.11960555613040924,
"learning_rate": 9.8e-05,
"loss": 1.3566,
"step": 49
},
{
"epoch": 0.13717421124828533,
"grad_norm": 0.12491138279438019,
"learning_rate": 0.0001,
"loss": 1.4201,
"step": 50
},
{
"epoch": 0.13991769547325103,
"grad_norm": 0.11906778067350388,
"learning_rate": 0.00010200000000000001,
"loss": 1.3745,
"step": 51
},
{
"epoch": 0.14266117969821673,
"grad_norm": 0.12701214849948883,
"learning_rate": 0.00010400000000000001,
"loss": 1.3592,
"step": 52
},
{
"epoch": 0.14540466392318244,
"grad_norm": 0.14920316636562347,
"learning_rate": 0.00010600000000000002,
"loss": 1.3139,
"step": 53
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.157944455742836,
"learning_rate": 0.00010800000000000001,
"loss": 1.3704,
"step": 54
},
{
"epoch": 0.15089163237311384,
"grad_norm": 0.15193891525268555,
"learning_rate": 0.00011000000000000002,
"loss": 1.3694,
"step": 55
},
{
"epoch": 0.15363511659807957,
"grad_norm": 0.15829141438007355,
"learning_rate": 0.00011200000000000001,
"loss": 1.3351,
"step": 56
},
{
"epoch": 0.15637860082304528,
"grad_norm": 0.16939309239387512,
"learning_rate": 0.00011399999999999999,
"loss": 1.3487,
"step": 57
},
{
"epoch": 0.15912208504801098,
"grad_norm": 0.19683969020843506,
"learning_rate": 0.000116,
"loss": 1.2837,
"step": 58
},
{
"epoch": 0.16186556927297668,
"grad_norm": 0.17148783802986145,
"learning_rate": 0.000118,
"loss": 1.3372,
"step": 59
},
{
"epoch": 0.1646090534979424,
"grad_norm": 0.17560726404190063,
"learning_rate": 0.00012,
"loss": 1.3896,
"step": 60
},
{
"epoch": 0.1673525377229081,
"grad_norm": 0.17536282539367676,
"learning_rate": 0.000122,
"loss": 1.4028,
"step": 61
},
{
"epoch": 0.1700960219478738,
"grad_norm": 0.16491512954235077,
"learning_rate": 0.000124,
"loss": 1.3366,
"step": 62
},
{
"epoch": 0.1728395061728395,
"grad_norm": 0.1202845573425293,
"learning_rate": 0.000126,
"loss": 1.3201,
"step": 63
},
{
"epoch": 0.1755829903978052,
"grad_norm": 0.10146432369947433,
"learning_rate": 0.00012800000000000002,
"loss": 1.3083,
"step": 64
},
{
"epoch": 0.17832647462277093,
"grad_norm": 0.0989551916718483,
"learning_rate": 0.00013000000000000002,
"loss": 1.3216,
"step": 65
},
{
"epoch": 0.18106995884773663,
"grad_norm": 0.09368593990802765,
"learning_rate": 0.000132,
"loss": 1.3409,
"step": 66
},
{
"epoch": 0.18381344307270234,
"grad_norm": 0.09617207944393158,
"learning_rate": 0.000134,
"loss": 1.3192,
"step": 67
},
{
"epoch": 0.18655692729766804,
"grad_norm": 0.08890332281589508,
"learning_rate": 0.00013600000000000003,
"loss": 1.354,
"step": 68
},
{
"epoch": 0.18930041152263374,
"grad_norm": 0.11371646821498871,
"learning_rate": 0.000138,
"loss": 1.3201,
"step": 69
},
{
"epoch": 0.19204389574759945,
"grad_norm": 0.09785107523202896,
"learning_rate": 0.00014,
"loss": 1.2413,
"step": 70
},
{
"epoch": 0.19478737997256515,
"grad_norm": 0.09149904549121857,
"learning_rate": 0.000142,
"loss": 1.2548,
"step": 71
},
{
"epoch": 0.19753086419753085,
"grad_norm": 0.08837990462779999,
"learning_rate": 0.000144,
"loss": 1.3014,
"step": 72
},
{
"epoch": 0.20027434842249658,
"grad_norm": 0.08963413536548615,
"learning_rate": 0.000146,
"loss": 1.3128,
"step": 73
},
{
"epoch": 0.2030178326474623,
"grad_norm": 0.08815225213766098,
"learning_rate": 0.000148,
"loss": 1.3321,
"step": 74
},
{
"epoch": 0.205761316872428,
"grad_norm": 0.09394700825214386,
"learning_rate": 0.00015000000000000001,
"loss": 1.3341,
"step": 75
},
{
"epoch": 0.2085048010973937,
"grad_norm": 0.10041660070419312,
"learning_rate": 0.000152,
"loss": 1.2944,
"step": 76
},
{
"epoch": 0.2112482853223594,
"grad_norm": 0.09344102442264557,
"learning_rate": 0.000154,
"loss": 1.3226,
"step": 77
},
{
"epoch": 0.2139917695473251,
"grad_norm": 0.09259933233261108,
"learning_rate": 0.00015600000000000002,
"loss": 1.2942,
"step": 78
},
{
"epoch": 0.2167352537722908,
"grad_norm": 0.09426167607307434,
"learning_rate": 0.00015800000000000002,
"loss": 1.333,
"step": 79
},
{
"epoch": 0.2194787379972565,
"grad_norm": 0.09674811363220215,
"learning_rate": 0.00016,
"loss": 1.3242,
"step": 80
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.09802138805389404,
"learning_rate": 0.000162,
"loss": 1.3322,
"step": 81
},
{
"epoch": 0.22496570644718794,
"grad_norm": 0.09528470784425735,
"learning_rate": 0.000164,
"loss": 1.2384,
"step": 82
},
{
"epoch": 0.22770919067215364,
"grad_norm": 0.0997876301407814,
"learning_rate": 0.000166,
"loss": 1.3646,
"step": 83
},
{
"epoch": 0.23045267489711935,
"grad_norm": 0.09820306301116943,
"learning_rate": 0.000168,
"loss": 1.1982,
"step": 84
},
{
"epoch": 0.23319615912208505,
"grad_norm": 0.10137173533439636,
"learning_rate": 0.00017,
"loss": 1.2697,
"step": 85
},
{
"epoch": 0.23593964334705075,
"grad_norm": 0.09000196307897568,
"learning_rate": 0.000172,
"loss": 1.3122,
"step": 86
},
{
"epoch": 0.23868312757201646,
"grad_norm": 0.12272510677576065,
"learning_rate": 0.000174,
"loss": 1.3238,
"step": 87
},
{
"epoch": 0.24142661179698216,
"grad_norm": 0.10101604461669922,
"learning_rate": 0.00017600000000000002,
"loss": 1.3637,
"step": 88
},
{
"epoch": 0.24417009602194786,
"grad_norm": 0.10622192174196243,
"learning_rate": 0.00017800000000000002,
"loss": 1.2577,
"step": 89
},
{
"epoch": 0.24691358024691357,
"grad_norm": 0.10139253735542297,
"learning_rate": 0.00018,
"loss": 1.303,
"step": 90
},
{
"epoch": 0.2496570644718793,
"grad_norm": 0.09918926656246185,
"learning_rate": 0.000182,
"loss": 1.2377,
"step": 91
},
{
"epoch": 0.252400548696845,
"grad_norm": 0.11826573312282562,
"learning_rate": 0.00018400000000000003,
"loss": 1.3773,
"step": 92
},
{
"epoch": 0.2551440329218107,
"grad_norm": 0.10880250483751297,
"learning_rate": 0.00018600000000000002,
"loss": 1.3056,
"step": 93
},
{
"epoch": 0.2578875171467764,
"grad_norm": 0.1100454330444336,
"learning_rate": 0.000188,
"loss": 1.258,
"step": 94
},
{
"epoch": 0.2606310013717421,
"grad_norm": 0.1200387105345726,
"learning_rate": 0.00019,
"loss": 1.3362,
"step": 95
},
{
"epoch": 0.26337448559670784,
"grad_norm": 0.10868213325738907,
"learning_rate": 0.000192,
"loss": 1.3175,
"step": 96
},
{
"epoch": 0.2661179698216735,
"grad_norm": 0.11683013290166855,
"learning_rate": 0.000194,
"loss": 1.301,
"step": 97
},
{
"epoch": 0.26886145404663925,
"grad_norm": 0.11990080773830414,
"learning_rate": 0.000196,
"loss": 1.2372,
"step": 98
},
{
"epoch": 0.2716049382716049,
"grad_norm": 0.1380293071269989,
"learning_rate": 0.00019800000000000002,
"loss": 1.2915,
"step": 99
},
{
"epoch": 0.27434842249657065,
"grad_norm": 0.14780984818935394,
"learning_rate": 0.0002,
"loss": 1.2423,
"step": 100
},
{
"epoch": 0.27709190672153633,
"grad_norm": 0.11625576764345169,
"learning_rate": 0.00019924242424242426,
"loss": 1.2613,
"step": 101
},
{
"epoch": 0.27983539094650206,
"grad_norm": 0.16299262642860413,
"learning_rate": 0.0001984848484848485,
"loss": 1.2527,
"step": 102
},
{
"epoch": 0.2825788751714678,
"grad_norm": 0.12003400921821594,
"learning_rate": 0.00019772727272727273,
"loss": 1.2459,
"step": 103
},
{
"epoch": 0.28532235939643347,
"grad_norm": 0.11611975729465485,
"learning_rate": 0.00019696969696969698,
"loss": 1.2736,
"step": 104
},
{
"epoch": 0.2880658436213992,
"grad_norm": 0.12332016229629517,
"learning_rate": 0.00019621212121212123,
"loss": 1.2161,
"step": 105
},
{
"epoch": 0.2908093278463649,
"grad_norm": 0.12996245920658112,
"learning_rate": 0.00019545454545454548,
"loss": 1.2254,
"step": 106
},
{
"epoch": 0.2935528120713306,
"grad_norm": 0.11514672636985779,
"learning_rate": 0.0001946969696969697,
"loss": 1.2367,
"step": 107
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.13371898233890533,
"learning_rate": 0.00019393939393939395,
"loss": 1.2172,
"step": 108
},
{
"epoch": 0.299039780521262,
"grad_norm": 0.1204146221280098,
"learning_rate": 0.0001931818181818182,
"loss": 1.3121,
"step": 109
},
{
"epoch": 0.3017832647462277,
"grad_norm": 0.11520334333181381,
"learning_rate": 0.00019242424242424245,
"loss": 1.2051,
"step": 110
},
{
"epoch": 0.3045267489711934,
"grad_norm": 0.15397988259792328,
"learning_rate": 0.00019166666666666667,
"loss": 1.2829,
"step": 111
},
{
"epoch": 0.30727023319615915,
"grad_norm": 0.12115694582462311,
"learning_rate": 0.00019090909090909092,
"loss": 1.3109,
"step": 112
},
{
"epoch": 0.3100137174211248,
"grad_norm": 0.12799453735351562,
"learning_rate": 0.00019015151515151517,
"loss": 1.2836,
"step": 113
},
{
"epoch": 0.31275720164609055,
"grad_norm": 0.1297687143087387,
"learning_rate": 0.00018939393939393942,
"loss": 1.2813,
"step": 114
},
{
"epoch": 0.31550068587105623,
"grad_norm": 0.13718412816524506,
"learning_rate": 0.00018863636363636364,
"loss": 1.3138,
"step": 115
},
{
"epoch": 0.31824417009602196,
"grad_norm": 0.12237361073493958,
"learning_rate": 0.0001878787878787879,
"loss": 1.1938,
"step": 116
},
{
"epoch": 0.32098765432098764,
"grad_norm": 0.12502606213092804,
"learning_rate": 0.00018712121212121212,
"loss": 1.3258,
"step": 117
},
{
"epoch": 0.32373113854595337,
"grad_norm": 0.13772045075893402,
"learning_rate": 0.00018636363636363636,
"loss": 1.3305,
"step": 118
},
{
"epoch": 0.32647462277091904,
"grad_norm": 0.1165667474269867,
"learning_rate": 0.00018560606060606061,
"loss": 1.2473,
"step": 119
},
{
"epoch": 0.3292181069958848,
"grad_norm": 0.12208285182714462,
"learning_rate": 0.00018484848484848484,
"loss": 1.2755,
"step": 120
},
{
"epoch": 0.3319615912208505,
"grad_norm": 0.13750925660133362,
"learning_rate": 0.00018409090909090909,
"loss": 1.178,
"step": 121
},
{
"epoch": 0.3347050754458162,
"grad_norm": 0.12554235756397247,
"learning_rate": 0.00018333333333333334,
"loss": 1.2727,
"step": 122
},
{
"epoch": 0.3374485596707819,
"grad_norm": 0.153937429189682,
"learning_rate": 0.00018257575757575758,
"loss": 1.2232,
"step": 123
},
{
"epoch": 0.3401920438957476,
"grad_norm": 0.12629558145999908,
"learning_rate": 0.00018181818181818183,
"loss": 1.1856,
"step": 124
},
{
"epoch": 0.3429355281207133,
"grad_norm": 0.13697242736816406,
"learning_rate": 0.00018106060606060606,
"loss": 1.3235,
"step": 125
},
{
"epoch": 0.345679012345679,
"grad_norm": 0.15304319560527802,
"learning_rate": 0.0001803030303030303,
"loss": 1.2457,
"step": 126
},
{
"epoch": 0.3484224965706447,
"grad_norm": 0.1399148851633072,
"learning_rate": 0.00017954545454545456,
"loss": 1.2907,
"step": 127
},
{
"epoch": 0.3511659807956104,
"grad_norm": 0.1822618842124939,
"learning_rate": 0.0001787878787878788,
"loss": 1.3335,
"step": 128
},
{
"epoch": 0.35390946502057613,
"grad_norm": 0.16444697976112366,
"learning_rate": 0.00017803030303030303,
"loss": 1.3234,
"step": 129
},
{
"epoch": 0.35665294924554186,
"grad_norm": 0.15233512222766876,
"learning_rate": 0.00017727272727272728,
"loss": 1.2672,
"step": 130
},
{
"epoch": 0.35939643347050754,
"grad_norm": 0.16545739769935608,
"learning_rate": 0.00017651515151515153,
"loss": 1.175,
"step": 131
},
{
"epoch": 0.36213991769547327,
"grad_norm": 0.13627974689006805,
"learning_rate": 0.00017575757575757578,
"loss": 1.2611,
"step": 132
},
{
"epoch": 0.36488340192043894,
"grad_norm": 0.1535719484090805,
"learning_rate": 0.000175,
"loss": 1.2334,
"step": 133
},
{
"epoch": 0.3676268861454047,
"grad_norm": 0.1347316950559616,
"learning_rate": 0.00017424242424242425,
"loss": 1.2259,
"step": 134
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.1347058117389679,
"learning_rate": 0.0001734848484848485,
"loss": 1.135,
"step": 135
},
{
"epoch": 0.3731138545953361,
"grad_norm": 0.1388654112815857,
"learning_rate": 0.00017272727272727275,
"loss": 1.2217,
"step": 136
},
{
"epoch": 0.37585733882030176,
"grad_norm": 0.14038826525211334,
"learning_rate": 0.00017196969696969697,
"loss": 1.2837,
"step": 137
},
{
"epoch": 0.3786008230452675,
"grad_norm": 0.1578507125377655,
"learning_rate": 0.00017121212121212122,
"loss": 1.2717,
"step": 138
},
{
"epoch": 0.3813443072702332,
"grad_norm": 0.13014937937259674,
"learning_rate": 0.00017045454545454547,
"loss": 1.2604,
"step": 139
},
{
"epoch": 0.3840877914951989,
"grad_norm": 0.16863836348056793,
"learning_rate": 0.00016969696969696972,
"loss": 1.2479,
"step": 140
},
{
"epoch": 0.3868312757201646,
"grad_norm": 0.1496235728263855,
"learning_rate": 0.00016893939393939394,
"loss": 1.1911,
"step": 141
},
{
"epoch": 0.3895747599451303,
"grad_norm": 0.15035606920719147,
"learning_rate": 0.0001681818181818182,
"loss": 1.2756,
"step": 142
},
{
"epoch": 0.39231824417009603,
"grad_norm": 0.14892420172691345,
"learning_rate": 0.00016742424242424244,
"loss": 1.199,
"step": 143
},
{
"epoch": 0.3950617283950617,
"grad_norm": 0.1547059863805771,
"learning_rate": 0.0001666666666666667,
"loss": 1.2301,
"step": 144
},
{
"epoch": 0.39780521262002744,
"grad_norm": 0.13619515299797058,
"learning_rate": 0.00016590909090909094,
"loss": 1.1821,
"step": 145
},
{
"epoch": 0.40054869684499317,
"grad_norm": 0.15868829190731049,
"learning_rate": 0.00016515151515151516,
"loss": 1.2416,
"step": 146
},
{
"epoch": 0.40329218106995884,
"grad_norm": 0.13451522588729858,
"learning_rate": 0.0001643939393939394,
"loss": 1.2108,
"step": 147
},
{
"epoch": 0.4060356652949246,
"grad_norm": 0.14629404246807098,
"learning_rate": 0.00016363636363636366,
"loss": 1.2176,
"step": 148
}
],
"logging_steps": 1,
"max_steps": 364,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.834377402632356e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}