basicmood / trainer_state.json
MyMoodAI's picture
Upload 13 files
afd3f61 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 541.0,
"eval_steps": 500,
"global_step": 9738,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.56,
"grad_norm": 0.40853407979011536,
"learning_rate": 4.998998767795805e-05,
"loss": 2.5454,
"step": 100
},
{
"epoch": 11.11,
"grad_norm": 0.27508941292762756,
"learning_rate": 4.995538747800402e-05,
"loss": 0.3265,
"step": 200
},
{
"epoch": 16.67,
"grad_norm": 0.19116361439228058,
"learning_rate": 4.989610999768349e-05,
"loss": 0.1042,
"step": 300
},
{
"epoch": 22.22,
"grad_norm": 0.10925190895795822,
"learning_rate": 4.9812213853878376e-05,
"loss": 0.0633,
"step": 400
},
{
"epoch": 27.78,
"grad_norm": 0.08680253475904465,
"learning_rate": 4.970378200777949e-05,
"loss": 0.0377,
"step": 500
},
{
"epoch": 33.33,
"grad_norm": 0.029545623809099197,
"learning_rate": 4.957092168284987e-05,
"loss": 0.0181,
"step": 600
},
{
"epoch": 38.89,
"grad_norm": 0.030413279309868813,
"learning_rate": 4.941376425879624e-05,
"loss": 0.008,
"step": 700
},
{
"epoch": 44.44,
"grad_norm": 0.05130019411444664,
"learning_rate": 4.923246514165339e-05,
"loss": 0.0037,
"step": 800
},
{
"epoch": 50.0,
"grad_norm": 0.005339763592928648,
"learning_rate": 4.902720361011007e-05,
"loss": 0.0021,
"step": 900
},
{
"epoch": 55.56,
"grad_norm": 0.023293694481253624,
"learning_rate": 4.8798182638228166e-05,
"loss": 0.0014,
"step": 1000
},
{
"epoch": 61.11,
"grad_norm": 0.014095759950578213,
"learning_rate": 4.8545628694730624e-05,
"loss": 0.001,
"step": 1100
},
{
"epoch": 66.67,
"grad_norm": 0.00977824255824089,
"learning_rate": 4.826979151905655e-05,
"loss": 0.0007,
"step": 1200
},
{
"epoch": 72.22,
"grad_norm": 0.006551814265549183,
"learning_rate": 4.797094387440491e-05,
"loss": 0.0006,
"step": 1300
},
{
"epoch": 77.78,
"grad_norm": 0.008941611275076866,
"learning_rate": 4.7649381278011e-05,
"loss": 0.0005,
"step": 1400
},
{
"epoch": 83.33,
"grad_norm": 0.0047577545046806335,
"learning_rate": 4.73054217089226e-05,
"loss": 0.0004,
"step": 1500
},
{
"epoch": 88.89,
"grad_norm": 0.005813396070152521,
"learning_rate": 4.693940529356444e-05,
"loss": 0.0003,
"step": 1600
},
{
"epoch": 94.44,
"grad_norm": 0.006802932824939489,
"learning_rate": 4.655169396940229e-05,
"loss": 0.0003,
"step": 1700
},
{
"epoch": 100.0,
"grad_norm": 0.0018754027551040053,
"learning_rate": 4.6142671127038905e-05,
"loss": 0.0002,
"step": 1800
},
{
"epoch": 105.56,
"grad_norm": 0.0058812773786485195,
"learning_rate": 4.571274123109606e-05,
"loss": 0.0002,
"step": 1900
},
{
"epoch": 111.11,
"grad_norm": 0.003945046104490757,
"learning_rate": 4.52623294202573e-05,
"loss": 0.0002,
"step": 2000
},
{
"epoch": 116.67,
"grad_norm": 0.0026905699633061886,
"learning_rate": 4.479188108686714e-05,
"loss": 0.0002,
"step": 2100
},
{
"epoch": 122.22,
"grad_norm": 0.00241717416793108,
"learning_rate": 4.4301861436502156e-05,
"loss": 0.0001,
"step": 2200
},
{
"epoch": 127.78,
"grad_norm": 0.002276218729093671,
"learning_rate": 4.379275502794983e-05,
"loss": 0.0001,
"step": 2300
},
{
"epoch": 133.33,
"grad_norm": 0.0011824576649814844,
"learning_rate": 4.326506529404972e-05,
"loss": 0.0001,
"step": 2400
},
{
"epoch": 138.89,
"grad_norm": 0.001206545508466661,
"learning_rate": 4.271931404387096e-05,
"loss": 0.0001,
"step": 2500
},
{
"epoch": 144.44,
"grad_norm": 0.0025704980362206697,
"learning_rate": 4.215604094671835e-05,
"loss": 0.0001,
"step": 2600
},
{
"epoch": 150.0,
"grad_norm": 0.0012343807611614466,
"learning_rate": 4.157580299847717e-05,
"loss": 0.0001,
"step": 2700
},
{
"epoch": 155.56,
"grad_norm": 0.0010726838372647762,
"learning_rate": 4.0979173970824626e-05,
"loss": 0.0001,
"step": 2800
},
{
"epoch": 161.11,
"grad_norm": 0.0008975835517048836,
"learning_rate": 4.036674384385231e-05,
"loss": 0.0001,
"step": 2900
},
{
"epoch": 166.67,
"grad_norm": 0.0015915404073894024,
"learning_rate": 3.973911822266099e-05,
"loss": 0.0001,
"step": 3000
},
{
"epoch": 172.22,
"grad_norm": 0.0013330359943211079,
"learning_rate": 3.909691773850445e-05,
"loss": 0.0001,
"step": 3100
},
{
"epoch": 177.78,
"grad_norm": 0.0007300216238945723,
"learning_rate": 3.844077743507468e-05,
"loss": 0.0001,
"step": 3200
},
{
"epoch": 183.33,
"grad_norm": 0.0006974161951802671,
"learning_rate": 3.777134614053522e-05,
"loss": 0.0001,
"step": 3300
},
{
"epoch": 188.89,
"grad_norm": 0.0016547333216294646,
"learning_rate": 3.7089285825923615e-05,
"loss": 0.0,
"step": 3400
},
{
"epoch": 194.44,
"grad_norm": 0.0006157997995615005,
"learning_rate": 3.639527095055753e-05,
"loss": 0.0,
"step": 3500
},
{
"epoch": 200.0,
"grad_norm": 0.000979723292402923,
"learning_rate": 3.568998779509173e-05,
"loss": 0.0,
"step": 3600
},
{
"epoch": 205.56,
"grad_norm": 0.0008198455907404423,
"learning_rate": 3.497413378288541e-05,
"loss": 0.0,
"step": 3700
},
{
"epoch": 211.11,
"grad_norm": 0.0012410281924530864,
"learning_rate": 3.424841679035109e-05,
"loss": 0.0,
"step": 3800
},
{
"epoch": 216.67,
"grad_norm": 0.0007560970261693001,
"learning_rate": 3.351355444696684e-05,
"loss": 0.0,
"step": 3900
},
{
"epoch": 222.22,
"grad_norm": 0.0005174472462385893,
"learning_rate": 3.277027342564428e-05,
"loss": 0.0,
"step": 4000
},
{
"epoch": 227.78,
"grad_norm": 0.00041007634717971087,
"learning_rate": 3.201930872415374e-05,
"loss": 0.0,
"step": 4100
},
{
"epoch": 233.33,
"grad_norm": 0.0008007108117453754,
"learning_rate": 3.126140293831746e-05,
"loss": 0.0,
"step": 4200
},
{
"epoch": 238.89,
"grad_norm": 0.00044247345067560673,
"learning_rate": 3.0497305527689445e-05,
"loss": 0.0,
"step": 4300
},
{
"epoch": 244.44,
"grad_norm": 0.0001442178909201175,
"learning_rate": 2.972777207444791e-05,
"loss": 0.0,
"step": 4400
},
{
"epoch": 250.0,
"grad_norm": 0.0005981961148791015,
"learning_rate": 2.8953563536233525e-05,
"loss": 0.0,
"step": 4500
},
{
"epoch": 255.56,
"grad_norm": 0.00022320376592688262,
"learning_rate": 2.8175445493671972e-05,
"loss": 0.0,
"step": 4600
},
{
"epoch": 261.11,
"grad_norm": 0.0004137264913879335,
"learning_rate": 2.7394187393325106e-05,
"loss": 0.0,
"step": 4700
},
{
"epoch": 266.67,
"grad_norm": 0.0003305468999315053,
"learning_rate": 2.6610561786819204e-05,
"loss": 0.0,
"step": 4800
},
{
"epoch": 272.22,
"grad_norm": 0.00018206202366854995,
"learning_rate": 2.5825343566902837e-05,
"loss": 0.0,
"step": 4900
},
{
"epoch": 277.78,
"grad_norm": 0.00040663284016773105,
"learning_rate": 2.5039309201189614e-05,
"loss": 0.0,
"step": 5000
},
{
"epoch": 283.33,
"grad_norm": 0.0004440485790837556,
"learning_rate": 2.4253235964343676e-05,
"loss": 0.0,
"step": 5100
},
{
"epoch": 288.89,
"grad_norm": 0.0003763148852158338,
"learning_rate": 2.34679011694671e-05,
"loss": 0.0,
"step": 5200
},
{
"epoch": 294.44,
"grad_norm": 0.0002928711473941803,
"learning_rate": 2.2684081399449327e-05,
"loss": 0.0,
"step": 5300
},
{
"epoch": 300.0,
"grad_norm": 0.0004030682030133903,
"learning_rate": 2.1902551739038624e-05,
"loss": 0.0,
"step": 5400
},
{
"epoch": 305.56,
"grad_norm": 0.0004344022599980235,
"learning_rate": 2.1124085008395054e-05,
"loss": 0.0,
"step": 5500
},
{
"epoch": 311.11,
"grad_norm": 0.00018473710224498063,
"learning_rate": 2.03494509988827e-05,
"loss": 0.0,
"step": 5600
},
{
"epoch": 316.67,
"grad_norm": 0.00044333594269119203,
"learning_rate": 1.9579415711857018e-05,
"loss": 0.0,
"step": 5700
},
{
"epoch": 322.22,
"grad_norm": 0.00039796039345674217,
"learning_rate": 1.881474060119994e-05,
"loss": 0.0,
"step": 5800
},
{
"epoch": 327.78,
"grad_norm": 0.0002483979915268719,
"learning_rate": 1.8056181820351738e-05,
"loss": 0.0,
"step": 5900
},
{
"epoch": 333.33,
"grad_norm": 0.00021489571372512728,
"learning_rate": 1.7304489474584307e-05,
"loss": 0.0,
"step": 6000
},
{
"epoch": 338.89,
"grad_norm": 0.00023669018992222846,
"learning_rate": 1.656040687925519e-05,
"loss": 0.0,
"step": 6100
},
{
"epoch": 344.44,
"grad_norm": 0.00023518071975558996,
"learning_rate": 1.582466982477587e-05,
"loss": 0.0,
"step": 6200
},
{
"epoch": 350.0,
"grad_norm": 0.00023862645321059972,
"learning_rate": 1.509800584902108e-05,
"loss": 0.0,
"step": 6300
},
{
"epoch": 355.56,
"grad_norm": 0.00020295938884373754,
"learning_rate": 1.4381133517898804e-05,
"loss": 0.0,
"step": 6400
},
{
"epoch": 361.11,
"grad_norm": 0.0003517361474223435,
"learning_rate": 1.3674761714792153e-05,
"loss": 0.0,
"step": 6500
},
{
"epoch": 366.67,
"grad_norm": 0.00022433781123254448,
"learning_rate": 1.297958893957588e-05,
"loss": 0.0,
"step": 6600
},
{
"epoch": 372.22,
"grad_norm": 0.0002291495620738715,
"learning_rate": 1.229630261790077e-05,
"loss": 0.0,
"step": 6700
},
{
"epoch": 377.78,
"grad_norm": 0.00021542608737945557,
"learning_rate": 1.1625578421428714e-05,
"loss": 0.0,
"step": 6800
},
{
"epoch": 383.33,
"grad_norm": 0.00030446049640886486,
"learning_rate": 1.0968079599690872e-05,
"loss": 0.0,
"step": 6900
},
{
"epoch": 388.89,
"grad_norm": 0.000319374434184283,
"learning_rate": 1.0324456324229537e-05,
"loss": 0.0,
"step": 7000
},
{
"epoch": 394.44,
"grad_norm": 0.0002203370677307248,
"learning_rate": 9.695345045672166e-06,
"loss": 0.0,
"step": 7100
},
{
"epoch": 400.0,
"grad_norm": 0.00015890812210272998,
"learning_rate": 9.081367864373488e-06,
"loss": 0.0,
"step": 7200
},
{
"epoch": 405.56,
"grad_norm": 0.000279374944511801,
"learning_rate": 8.483131915247968e-06,
"loss": 0.0,
"step": 7300
},
{
"epoch": 411.11,
"grad_norm": 0.00011563805310288444,
"learning_rate": 7.901228767400859e-06,
"loss": 0.0,
"step": 7400
},
{
"epoch": 416.67,
"grad_norm": 0.00013281428255140781,
"learning_rate": 7.336233839151693e-06,
"loss": 0.0,
"step": 7500
},
{
"epoch": 422.22,
"grad_norm": 0.0002364695246797055,
"learning_rate": 6.788705829028483e-06,
"loss": 0.0,
"step": 7600
},
{
"epoch": 427.78,
"grad_norm": 0.00013619402307085693,
"learning_rate": 6.259186163295438e-06,
"loss": 0.0,
"step": 7700
},
{
"epoch": 433.33,
"grad_norm": 0.0002181720337830484,
"learning_rate": 5.748198460560475e-06,
"loss": 0.0,
"step": 7800
},
{
"epoch": 438.89,
"grad_norm": 0.0001409970282111317,
"learning_rate": 5.256248013991857e-06,
"loss": 0.0,
"step": 7900
},
{
"epoch": 444.44,
"grad_norm": 8.899461681721732e-05,
"learning_rate": 4.78382129165613e-06,
"loss": 0.0,
"step": 8000
},
{
"epoch": 450.0,
"grad_norm": 9.178288746625185e-05,
"learning_rate": 4.331385455471346e-06,
"loss": 0.0,
"step": 8100
},
{
"epoch": 455.56,
"grad_norm": 0.00016385990602429956,
"learning_rate": 3.8993878992512415e-06,
"loss": 0.0,
"step": 8200
},
{
"epoch": 461.11,
"grad_norm": 0.00012868938210885972,
"learning_rate": 3.488255806297311e-06,
"loss": 0.0,
"step": 8300
},
{
"epoch": 466.67,
"grad_norm": 0.00018513025133870542,
"learning_rate": 3.09839572697605e-06,
"loss": 0.0,
"step": 8400
},
{
"epoch": 472.22,
"grad_norm": 0.00016808818327262998,
"learning_rate": 2.7301931766992917e-06,
"loss": 0.0,
"step": 8500
},
{
"epoch": 477.78,
"grad_norm": 0.0001630824408493936,
"learning_rate": 2.384012254705048e-06,
"loss": 0.0,
"step": 8600
},
{
"epoch": 483.33,
"grad_norm": 0.00018219888443127275,
"learning_rate": 2.0601952840158366e-06,
"loss": 0.0,
"step": 8700
},
{
"epoch": 488.89,
"grad_norm": 0.00011875651398440823,
"learning_rate": 1.75906247293057e-06,
"loss": 0.0,
"step": 8800
},
{
"epoch": 494.44,
"grad_norm": 0.00015640753554180264,
"learning_rate": 1.4809115983847266e-06,
"loss": 0.0,
"step": 8900
},
{
"epoch": 500.0,
"grad_norm": 0.00010140336962649599,
"learning_rate": 1.226017711491867e-06,
"loss": 0.0,
"step": 9000
},
{
"epoch": 505.56,
"grad_norm": 0.00018589019600767642,
"learning_rate": 9.946328655577624e-07,
"loss": 0.0,
"step": 9100
},
{
"epoch": 511.11,
"grad_norm": 0.00026709603844210505,
"learning_rate": 7.869858668360042e-07,
"loss": 0.0,
"step": 9200
},
{
"epoch": 516.67,
"grad_norm": 0.00017575189121998847,
"learning_rate": 6.032820482716001e-07,
"loss": 0.0,
"step": 9300
},
{
"epoch": 522.22,
"grad_norm": 0.0001377611479256302,
"learning_rate": 4.437030664562969e-07,
"loss": 0.0,
"step": 9400
},
{
"epoch": 527.78,
"grad_norm": 9.088205842999741e-05,
"learning_rate": 3.084067219964182e-07,
"loss": 0.0,
"step": 9500
},
{
"epoch": 533.33,
"grad_norm": 0.00015840640116948634,
"learning_rate": 1.975268034707878e-07,
"loss": 0.0,
"step": 9600
},
{
"epoch": 538.89,
"grad_norm": 0.00013717034016735852,
"learning_rate": 1.1117295513313475e-07,
"loss": 0.0,
"step": 9700
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 556,
"save_steps": 500,
"total_flos": 2.673487599113011e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}