madtest / trainer_state.json
themadmedic's picture
Upload 11 files
5e2fa74 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 500,
"global_step": 156,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": 1.0685396194458008,
"learning_rate": 3.125e-06,
"loss": 2.4322,
"step": 1
},
{
"epoch": 0.0128,
"grad_norm": 1.0006159543991089,
"learning_rate": 6.25e-06,
"loss": 2.3365,
"step": 2
},
{
"epoch": 0.0192,
"grad_norm": 1.1945818662643433,
"learning_rate": 9.375000000000001e-06,
"loss": 2.4477,
"step": 3
},
{
"epoch": 0.0256,
"grad_norm": 1.293664574623108,
"learning_rate": 1.25e-05,
"loss": 2.3515,
"step": 4
},
{
"epoch": 0.032,
"grad_norm": 1.2334715127944946,
"learning_rate": 1.5625e-05,
"loss": 2.4067,
"step": 5
},
{
"epoch": 0.0384,
"grad_norm": 1.1286251544952393,
"learning_rate": 1.8750000000000002e-05,
"loss": 2.2962,
"step": 6
},
{
"epoch": 0.0448,
"grad_norm": 1.3371106386184692,
"learning_rate": 2.1875e-05,
"loss": 2.4096,
"step": 7
},
{
"epoch": 0.0512,
"grad_norm": 1.0381008386611938,
"learning_rate": 2.5e-05,
"loss": 2.3034,
"step": 8
},
{
"epoch": 0.0576,
"grad_norm": 0.980749249458313,
"learning_rate": 2.8125000000000003e-05,
"loss": 2.3573,
"step": 9
},
{
"epoch": 0.064,
"grad_norm": 0.9865837693214417,
"learning_rate": 3.125e-05,
"loss": 2.4266,
"step": 10
},
{
"epoch": 0.0704,
"grad_norm": 0.9698574542999268,
"learning_rate": 3.4375e-05,
"loss": 2.4926,
"step": 11
},
{
"epoch": 0.0768,
"grad_norm": 1.0955685377120972,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.4153,
"step": 12
},
{
"epoch": 0.0832,
"grad_norm": 1.0469869375228882,
"learning_rate": 4.0625000000000005e-05,
"loss": 2.4177,
"step": 13
},
{
"epoch": 0.0896,
"grad_norm": 0.9750747680664062,
"learning_rate": 4.375e-05,
"loss": 2.1965,
"step": 14
},
{
"epoch": 0.096,
"grad_norm": 0.7926378846168518,
"learning_rate": 4.6875e-05,
"loss": 2.2684,
"step": 15
},
{
"epoch": 0.1024,
"grad_norm": 0.89713054895401,
"learning_rate": 5e-05,
"loss": 2.3327,
"step": 16
},
{
"epoch": 0.1088,
"grad_norm": 0.9965333342552185,
"learning_rate": 4.9993705873562665e-05,
"loss": 2.3337,
"step": 17
},
{
"epoch": 0.1152,
"grad_norm": 0.9500048160552979,
"learning_rate": 4.997482666353287e-05,
"loss": 2.4403,
"step": 18
},
{
"epoch": 0.1216,
"grad_norm": 0.9680395126342773,
"learning_rate": 4.99433718761614e-05,
"loss": 2.2885,
"step": 19
},
{
"epoch": 0.128,
"grad_norm": 0.987067699432373,
"learning_rate": 4.989935734988098e-05,
"loss": 2.3327,
"step": 20
},
{
"epoch": 0.1344,
"grad_norm": 0.9618147015571594,
"learning_rate": 4.984280524733107e-05,
"loss": 2.2491,
"step": 21
},
{
"epoch": 0.1408,
"grad_norm": 0.9065819382667542,
"learning_rate": 4.977374404419837e-05,
"loss": 2.29,
"step": 22
},
{
"epoch": 0.1472,
"grad_norm": 0.9156733155250549,
"learning_rate": 4.9692208514878444e-05,
"loss": 2.3422,
"step": 23
},
{
"epoch": 0.1536,
"grad_norm": 0.9592928290367126,
"learning_rate": 4.959823971496574e-05,
"loss": 2.3533,
"step": 24
},
{
"epoch": 0.16,
"grad_norm": 1.0047590732574463,
"learning_rate": 4.9491884960580894e-05,
"loss": 2.4621,
"step": 25
},
{
"epoch": 0.1664,
"grad_norm": 1.1006929874420166,
"learning_rate": 4.937319780454559e-05,
"loss": 2.2835,
"step": 26
},
{
"epoch": 0.1728,
"grad_norm": 0.9003715515136719,
"learning_rate": 4.9242238009417175e-05,
"loss": 2.3768,
"step": 27
},
{
"epoch": 0.1792,
"grad_norm": 0.7857057452201843,
"learning_rate": 4.909907151739633e-05,
"loss": 2.3324,
"step": 28
},
{
"epoch": 0.1856,
"grad_norm": 1.1292816400527954,
"learning_rate": 4.894377041712326e-05,
"loss": 2.2826,
"step": 29
},
{
"epoch": 0.192,
"grad_norm": 1.1270015239715576,
"learning_rate": 4.877641290737884e-05,
"loss": 2.3287,
"step": 30
},
{
"epoch": 0.1984,
"grad_norm": 0.8685899376869202,
"learning_rate": 4.8597083257709194e-05,
"loss": 2.2806,
"step": 31
},
{
"epoch": 0.2048,
"grad_norm": 0.809633195400238,
"learning_rate": 4.8405871765993433e-05,
"loss": 2.215,
"step": 32
},
{
"epoch": 0.2112,
"grad_norm": 0.8597051501274109,
"learning_rate": 4.820287471297598e-05,
"loss": 2.223,
"step": 33
},
{
"epoch": 0.2176,
"grad_norm": 0.7983012199401855,
"learning_rate": 4.7988194313786275e-05,
"loss": 2.4779,
"step": 34
},
{
"epoch": 0.224,
"grad_norm": 0.8999346494674683,
"learning_rate": 4.7761938666470403e-05,
"loss": 2.1912,
"step": 35
},
{
"epoch": 0.2304,
"grad_norm": 0.8851997256278992,
"learning_rate": 4.752422169756048e-05,
"loss": 2.3242,
"step": 36
},
{
"epoch": 0.2368,
"grad_norm": 0.9289189577102661,
"learning_rate": 4.72751631047092e-05,
"loss": 2.3494,
"step": 37
},
{
"epoch": 0.2432,
"grad_norm": 0.7859861254692078,
"learning_rate": 4.701488829641845e-05,
"loss": 2.3736,
"step": 38
},
{
"epoch": 0.2496,
"grad_norm": 1.139488697052002,
"learning_rate": 4.674352832889239e-05,
"loss": 2.2673,
"step": 39
},
{
"epoch": 0.256,
"grad_norm": 1.1300232410430908,
"learning_rate": 4.6461219840046654e-05,
"loss": 2.0377,
"step": 40
},
{
"epoch": 0.2624,
"grad_norm": 0.9157702922821045,
"learning_rate": 4.6168104980707107e-05,
"loss": 2.2182,
"step": 41
},
{
"epoch": 0.2688,
"grad_norm": 0.8134081959724426,
"learning_rate": 4.586433134303257e-05,
"loss": 2.1724,
"step": 42
},
{
"epoch": 0.2752,
"grad_norm": 0.8959707617759705,
"learning_rate": 4.5550051886197754e-05,
"loss": 2.1802,
"step": 43
},
{
"epoch": 0.2816,
"grad_norm": 0.8954533934593201,
"learning_rate": 4.522542485937369e-05,
"loss": 2.3508,
"step": 44
},
{
"epoch": 0.288,
"grad_norm": 0.9747589826583862,
"learning_rate": 4.489061372204453e-05,
"loss": 2.3869,
"step": 45
},
{
"epoch": 0.2944,
"grad_norm": 0.7722731232643127,
"learning_rate": 4.454578706170075e-05,
"loss": 2.3238,
"step": 46
},
{
"epoch": 0.3008,
"grad_norm": 1.068015694618225,
"learning_rate": 4.419111850895028e-05,
"loss": 2.0444,
"step": 47
},
{
"epoch": 0.3072,
"grad_norm": 0.8856999278068542,
"learning_rate": 4.382678665009028e-05,
"loss": 2.3237,
"step": 48
},
{
"epoch": 0.3136,
"grad_norm": 0.9963024258613586,
"learning_rate": 4.345297493718352e-05,
"loss": 1.788,
"step": 49
},
{
"epoch": 0.32,
"grad_norm": 1.3925434350967407,
"learning_rate": 4.306987159568479e-05,
"loss": 2.3318,
"step": 50
},
{
"epoch": 0.3264,
"grad_norm": 1.1278175115585327,
"learning_rate": 4.267766952966369e-05,
"loss": 2.2681,
"step": 51
},
{
"epoch": 0.3328,
"grad_norm": 0.9084079265594482,
"learning_rate": 4.227656622467162e-05,
"loss": 2.3358,
"step": 52
},
{
"epoch": 0.3392,
"grad_norm": 1.2010737657546997,
"learning_rate": 4.186676364830186e-05,
"loss": 2.2058,
"step": 53
},
{
"epoch": 0.3456,
"grad_norm": 0.9182211756706238,
"learning_rate": 4.144846814849282e-05,
"loss": 2.4409,
"step": 54
},
{
"epoch": 0.352,
"grad_norm": 1.0228757858276367,
"learning_rate": 4.10218903496256e-05,
"loss": 2.4818,
"step": 55
},
{
"epoch": 0.3584,
"grad_norm": 0.7581411004066467,
"learning_rate": 4.058724504646834e-05,
"loss": 2.1951,
"step": 56
},
{
"epoch": 0.3648,
"grad_norm": 0.8679158687591553,
"learning_rate": 4.01447510960205e-05,
"loss": 2.1719,
"step": 57
},
{
"epoch": 0.3712,
"grad_norm": 0.9111694097518921,
"learning_rate": 3.969463130731183e-05,
"loss": 2.2189,
"step": 58
},
{
"epoch": 0.3776,
"grad_norm": 0.7689900994300842,
"learning_rate": 3.92371123292113e-05,
"loss": 2.3409,
"step": 59
},
{
"epoch": 0.384,
"grad_norm": 0.8174585103988647,
"learning_rate": 3.8772424536302564e-05,
"loss": 2.2584,
"step": 60
},
{
"epoch": 0.3904,
"grad_norm": 0.8918123245239258,
"learning_rate": 3.830080191288342e-05,
"loss": 2.4437,
"step": 61
},
{
"epoch": 0.3968,
"grad_norm": 0.9666162729263306,
"learning_rate": 3.782248193514766e-05,
"loss": 2.4876,
"step": 62
},
{
"epoch": 0.4032,
"grad_norm": 0.8694432377815247,
"learning_rate": 3.7337705451608674e-05,
"loss": 2.3512,
"step": 63
},
{
"epoch": 0.4096,
"grad_norm": 0.7689424157142639,
"learning_rate": 3.6846716561824965e-05,
"loss": 2.3777,
"step": 64
},
{
"epoch": 0.416,
"grad_norm": 0.8883704543113708,
"learning_rate": 3.634976249348867e-05,
"loss": 2.3313,
"step": 65
},
{
"epoch": 0.4224,
"grad_norm": 0.7688347697257996,
"learning_rate": 3.5847093477938956e-05,
"loss": 2.3446,
"step": 66
},
{
"epoch": 0.4288,
"grad_norm": 0.8349875211715698,
"learning_rate": 3.533896262416302e-05,
"loss": 2.2939,
"step": 67
},
{
"epoch": 0.4352,
"grad_norm": 0.9108989238739014,
"learning_rate": 3.4825625791348096e-05,
"loss": 2.3362,
"step": 68
},
{
"epoch": 0.4416,
"grad_norm": 1.0305312871932983,
"learning_rate": 3.4307341460048633e-05,
"loss": 2.0807,
"step": 69
},
{
"epoch": 0.448,
"grad_norm": 1.1350359916687012,
"learning_rate": 3.378437060203357e-05,
"loss": 2.3806,
"step": 70
},
{
"epoch": 0.4544,
"grad_norm": 0.9963751435279846,
"learning_rate": 3.3256976548879184e-05,
"loss": 2.2114,
"step": 71
},
{
"epoch": 0.4608,
"grad_norm": 0.9849460124969482,
"learning_rate": 3.272542485937369e-05,
"loss": 2.5144,
"step": 72
},
{
"epoch": 0.4672,
"grad_norm": 0.9103078246116638,
"learning_rate": 3.218998318580043e-05,
"loss": 2.2375,
"step": 73
},
{
"epoch": 0.4736,
"grad_norm": 0.8857175707817078,
"learning_rate": 3.165092113916688e-05,
"loss": 2.3145,
"step": 74
},
{
"epoch": 0.48,
"grad_norm": 0.9165334105491638,
"learning_rate": 3.110851015344735e-05,
"loss": 2.3062,
"step": 75
},
{
"epoch": 0.4864,
"grad_norm": 0.8006656765937805,
"learning_rate": 3.056302334890786e-05,
"loss": 2.3114,
"step": 76
},
{
"epoch": 0.4928,
"grad_norm": 0.8809583187103271,
"learning_rate": 3.0014735394581823e-05,
"loss": 2.3675,
"step": 77
},
{
"epoch": 0.4992,
"grad_norm": 1.0938267707824707,
"learning_rate": 2.9463922369965917e-05,
"loss": 2.2234,
"step": 78
},
{
"epoch": 0.5056,
"grad_norm": 1.0633916854858398,
"learning_rate": 2.8910861626005776e-05,
"loss": 2.3126,
"step": 79
},
{
"epoch": 0.512,
"grad_norm": 1.1602728366851807,
"learning_rate": 2.8355831645441388e-05,
"loss": 2.2367,
"step": 80
},
{
"epoch": 0.5184,
"grad_norm": 0.9738953709602356,
"learning_rate": 2.7799111902582696e-05,
"loss": 2.3593,
"step": 81
},
{
"epoch": 0.5248,
"grad_norm": 1.1221870183944702,
"learning_rate": 2.724098272258584e-05,
"loss": 2.3035,
"step": 82
},
{
"epoch": 0.5312,
"grad_norm": 0.8839357495307922,
"learning_rate": 2.6681725140300997e-05,
"loss": 2.1945,
"step": 83
},
{
"epoch": 0.5376,
"grad_norm": 0.7886901497840881,
"learning_rate": 2.6121620758762877e-05,
"loss": 2.3267,
"step": 84
},
{
"epoch": 0.544,
"grad_norm": 0.7379631996154785,
"learning_rate": 2.556095160739513e-05,
"loss": 2.3424,
"step": 85
},
{
"epoch": 0.5504,
"grad_norm": 1.0881575345993042,
"learning_rate": 2.5e-05,
"loss": 2.3977,
"step": 86
},
{
"epoch": 0.5568,
"grad_norm": 0.9565750956535339,
"learning_rate": 2.443904839260488e-05,
"loss": 2.2094,
"step": 87
},
{
"epoch": 0.5632,
"grad_norm": 0.9259947538375854,
"learning_rate": 2.3878379241237136e-05,
"loss": 2.2267,
"step": 88
},
{
"epoch": 0.5696,
"grad_norm": 0.7104063630104065,
"learning_rate": 2.331827485969901e-05,
"loss": 2.3152,
"step": 89
},
{
"epoch": 0.576,
"grad_norm": 1.113071084022522,
"learning_rate": 2.2759017277414166e-05,
"loss": 2.2573,
"step": 90
},
{
"epoch": 0.5824,
"grad_norm": 0.8865794539451599,
"learning_rate": 2.2200888097417307e-05,
"loss": 2.1592,
"step": 91
},
{
"epoch": 0.5888,
"grad_norm": 1.157195806503296,
"learning_rate": 2.164416835455862e-05,
"loss": 2.3789,
"step": 92
},
{
"epoch": 0.5952,
"grad_norm": 0.9066339731216431,
"learning_rate": 2.1089138373994223e-05,
"loss": 2.3482,
"step": 93
},
{
"epoch": 0.6016,
"grad_norm": 0.8584357500076294,
"learning_rate": 2.0536077630034086e-05,
"loss": 2.327,
"step": 94
},
{
"epoch": 0.608,
"grad_norm": 0.8079529404640198,
"learning_rate": 1.9985264605418183e-05,
"loss": 2.2369,
"step": 95
},
{
"epoch": 0.6144,
"grad_norm": 0.7658668756484985,
"learning_rate": 1.9436976651092144e-05,
"loss": 2.3003,
"step": 96
},
{
"epoch": 0.6208,
"grad_norm": 0.871282696723938,
"learning_rate": 1.8891489846552646e-05,
"loss": 2.2668,
"step": 97
},
{
"epoch": 0.6272,
"grad_norm": 0.9121792912483215,
"learning_rate": 1.8349078860833123e-05,
"loss": 2.3658,
"step": 98
},
{
"epoch": 0.6336,
"grad_norm": 0.7131676077842712,
"learning_rate": 1.781001681419957e-05,
"loss": 2.2663,
"step": 99
},
{
"epoch": 0.64,
"grad_norm": 0.9556296467781067,
"learning_rate": 1.7274575140626318e-05,
"loss": 2.3178,
"step": 100
},
{
"epoch": 0.6464,
"grad_norm": 0.7415570616722107,
"learning_rate": 1.6743023451120832e-05,
"loss": 2.4113,
"step": 101
},
{
"epoch": 0.6528,
"grad_norm": 0.8964122533798218,
"learning_rate": 1.621562939796643e-05,
"loss": 2.2945,
"step": 102
},
{
"epoch": 0.6592,
"grad_norm": 0.7949494123458862,
"learning_rate": 1.5692658539951372e-05,
"loss": 2.3268,
"step": 103
},
{
"epoch": 0.6656,
"grad_norm": 0.6219509840011597,
"learning_rate": 1.5174374208651912e-05,
"loss": 2.3638,
"step": 104
},
{
"epoch": 0.672,
"grad_norm": 0.7099913358688354,
"learning_rate": 1.466103737583699e-05,
"loss": 2.1767,
"step": 105
},
{
"epoch": 0.6784,
"grad_norm": 0.7147581577301025,
"learning_rate": 1.4152906522061048e-05,
"loss": 2.2226,
"step": 106
},
{
"epoch": 0.6848,
"grad_norm": 0.741882860660553,
"learning_rate": 1.3650237506511331e-05,
"loss": 2.3083,
"step": 107
},
{
"epoch": 0.6912,
"grad_norm": 0.767869770526886,
"learning_rate": 1.3153283438175034e-05,
"loss": 2.441,
"step": 108
},
{
"epoch": 0.6976,
"grad_norm": 0.9673330187797546,
"learning_rate": 1.2662294548391328e-05,
"loss": 1.929,
"step": 109
},
{
"epoch": 0.704,
"grad_norm": 0.8619219660758972,
"learning_rate": 1.217751806485235e-05,
"loss": 2.1907,
"step": 110
},
{
"epoch": 0.7104,
"grad_norm": 0.8909844160079956,
"learning_rate": 1.1699198087116589e-05,
"loss": 2.4108,
"step": 111
},
{
"epoch": 0.7168,
"grad_norm": 0.7758563756942749,
"learning_rate": 1.122757546369744e-05,
"loss": 2.4175,
"step": 112
},
{
"epoch": 0.7232,
"grad_norm": 0.8926962614059448,
"learning_rate": 1.0762887670788702e-05,
"loss": 2.2828,
"step": 113
},
{
"epoch": 0.7296,
"grad_norm": 0.9610178470611572,
"learning_rate": 1.0305368692688174e-05,
"loss": 2.4236,
"step": 114
},
{
"epoch": 0.736,
"grad_norm": 0.9051185250282288,
"learning_rate": 9.855248903979506e-06,
"loss": 2.3165,
"step": 115
},
{
"epoch": 0.7424,
"grad_norm": 0.9481095671653748,
"learning_rate": 9.412754953531663e-06,
"loss": 2.2748,
"step": 116
},
{
"epoch": 0.7488,
"grad_norm": 0.8186295032501221,
"learning_rate": 8.978109650374397e-06,
"loss": 2.164,
"step": 117
},
{
"epoch": 0.7552,
"grad_norm": 0.7063673734664917,
"learning_rate": 8.551531851507186e-06,
"loss": 2.3275,
"step": 118
},
{
"epoch": 0.7616,
"grad_norm": 0.7460413575172424,
"learning_rate": 8.133236351698143e-06,
"loss": 2.3605,
"step": 119
},
{
"epoch": 0.768,
"grad_norm": 0.7464547753334045,
"learning_rate": 7.723433775328384e-06,
"loss": 2.3094,
"step": 120
},
{
"epoch": 0.7744,
"grad_norm": 0.7606619596481323,
"learning_rate": 7.3223304703363135e-06,
"loss": 2.3612,
"step": 121
},
{
"epoch": 0.7808,
"grad_norm": 0.9987122416496277,
"learning_rate": 6.930128404315214e-06,
"loss": 2.3813,
"step": 122
},
{
"epoch": 0.7872,
"grad_norm": 0.7884767055511475,
"learning_rate": 6.547025062816486e-06,
"loss": 2.4269,
"step": 123
},
{
"epoch": 0.7936,
"grad_norm": 1.048710584640503,
"learning_rate": 6.173213349909729e-06,
"loss": 2.3048,
"step": 124
},
{
"epoch": 0.8,
"grad_norm": 0.763475775718689,
"learning_rate": 5.808881491049723e-06,
"loss": 2.2594,
"step": 125
},
{
"epoch": 0.8064,
"grad_norm": 0.9479842185974121,
"learning_rate": 5.454212938299255e-06,
"loss": 2.1066,
"step": 126
},
{
"epoch": 0.8128,
"grad_norm": 0.7646458745002747,
"learning_rate": 5.1093862779554776e-06,
"loss": 2.1894,
"step": 127
},
{
"epoch": 0.8192,
"grad_norm": 0.8277719020843506,
"learning_rate": 4.7745751406263165e-06,
"loss": 2.229,
"step": 128
},
{
"epoch": 0.8256,
"grad_norm": 0.8212159276008606,
"learning_rate": 4.4499481138022544e-06,
"loss": 2.3768,
"step": 129
},
{
"epoch": 0.832,
"grad_norm": 0.7613511085510254,
"learning_rate": 4.135668656967434e-06,
"loss": 2.1492,
"step": 130
},
{
"epoch": 0.8384,
"grad_norm": 1.0029220581054688,
"learning_rate": 3.831895019292897e-06,
"loss": 2.4967,
"step": 131
},
{
"epoch": 0.8448,
"grad_norm": 0.8894818425178528,
"learning_rate": 3.5387801599533475e-06,
"loss": 2.1725,
"step": 132
},
{
"epoch": 0.8512,
"grad_norm": 0.7379801273345947,
"learning_rate": 3.2564716711076167e-06,
"loss": 2.4472,
"step": 133
},
{
"epoch": 0.8576,
"grad_norm": 0.93888258934021,
"learning_rate": 2.98511170358155e-06,
"loss": 2.3714,
"step": 134
},
{
"epoch": 0.864,
"grad_norm": 0.7867335677146912,
"learning_rate": 2.7248368952908053e-06,
"loss": 2.3021,
"step": 135
},
{
"epoch": 0.8704,
"grad_norm": 0.825037956237793,
"learning_rate": 2.475778302439524e-06,
"loss": 2.1939,
"step": 136
},
{
"epoch": 0.8768,
"grad_norm": 0.9306979179382324,
"learning_rate": 2.2380613335296036e-06,
"loss": 1.982,
"step": 137
},
{
"epoch": 0.8832,
"grad_norm": 0.835216224193573,
"learning_rate": 2.0118056862137357e-06,
"loss": 2.443,
"step": 138
},
{
"epoch": 0.8896,
"grad_norm": 0.8186837434768677,
"learning_rate": 1.7971252870240291e-06,
"loss": 2.3994,
"step": 139
},
{
"epoch": 0.896,
"grad_norm": 0.7756664156913757,
"learning_rate": 1.59412823400657e-06,
"loss": 2.0448,
"step": 140
},
{
"epoch": 0.9024,
"grad_norm": 0.6998193264007568,
"learning_rate": 1.4029167422908107e-06,
"loss": 2.2002,
"step": 141
},
{
"epoch": 0.9088,
"grad_norm": 1.0741509199142456,
"learning_rate": 1.2235870926211619e-06,
"loss": 2.2958,
"step": 142
},
{
"epoch": 0.9152,
"grad_norm": 0.8149780631065369,
"learning_rate": 1.0562295828767387e-06,
"loss": 2.1915,
"step": 143
},
{
"epoch": 0.9216,
"grad_norm": 0.9092973470687866,
"learning_rate": 9.009284826036691e-07,
"loss": 2.3314,
"step": 144
},
{
"epoch": 0.928,
"grad_norm": 0.6918194890022278,
"learning_rate": 7.577619905828282e-07,
"loss": 2.2965,
"step": 145
},
{
"epoch": 0.9344,
"grad_norm": 0.7561943531036377,
"learning_rate": 6.268021954544096e-07,
"loss": 2.2681,
"step": 146
},
{
"epoch": 0.9408,
"grad_norm": 0.7160936594009399,
"learning_rate": 5.08115039419113e-07,
"loss": 2.2881,
"step": 147
},
{
"epoch": 0.9472,
"grad_norm": 0.8575864434242249,
"learning_rate": 4.0176028503425835e-07,
"loss": 2.3654,
"step": 148
},
{
"epoch": 0.9536,
"grad_norm": 0.8530429005622864,
"learning_rate": 3.077914851215585e-07,
"loss": 2.2369,
"step": 149
},
{
"epoch": 0.96,
"grad_norm": 0.8026803135871887,
"learning_rate": 2.262559558016325e-07,
"loss": 2.2846,
"step": 150
},
{
"epoch": 0.9664,
"grad_norm": 0.7623191475868225,
"learning_rate": 1.571947526689349e-07,
"loss": 2.3406,
"step": 151
},
{
"epoch": 0.9728,
"grad_norm": 0.7418063282966614,
"learning_rate": 1.006426501190233e-07,
"loss": 2.269,
"step": 152
},
{
"epoch": 0.9792,
"grad_norm": 0.8512701392173767,
"learning_rate": 5.662812383859795e-08,
"loss": 2.5015,
"step": 153
},
{
"epoch": 0.9856,
"grad_norm": 0.9002600312232971,
"learning_rate": 2.5173336467135267e-08,
"loss": 2.2231,
"step": 154
},
{
"epoch": 0.992,
"grad_norm": 0.9623058438301086,
"learning_rate": 6.294126437336734e-09,
"loss": 2.3822,
"step": 155
},
{
"epoch": 0.9984,
"grad_norm": 0.7657002210617065,
"learning_rate": 0.0,
"loss": 2.1786,
"step": 156
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.5463191906952806e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}