DuongTrongChi's picture
Training in progress, step 84, checkpoint
cec76e4 verified
raw
history blame
14.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.23045267489711935,
"eval_steps": 500,
"global_step": 84,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027434842249657062,
"grad_norm": 1.902702808380127,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.0959,
"step": 1
},
{
"epoch": 0.0054869684499314125,
"grad_norm": 1.8936893939971924,
"learning_rate": 4.000000000000001e-06,
"loss": 2.099,
"step": 2
},
{
"epoch": 0.00823045267489712,
"grad_norm": 1.9235605001449585,
"learning_rate": 6e-06,
"loss": 2.1045,
"step": 3
},
{
"epoch": 0.010973936899862825,
"grad_norm": 1.7873843908309937,
"learning_rate": 8.000000000000001e-06,
"loss": 2.0144,
"step": 4
},
{
"epoch": 0.013717421124828532,
"grad_norm": 1.8321126699447632,
"learning_rate": 1e-05,
"loss": 2.0872,
"step": 5
},
{
"epoch": 0.01646090534979424,
"grad_norm": 2.0333194732666016,
"learning_rate": 1.2e-05,
"loss": 2.1146,
"step": 6
},
{
"epoch": 0.019204389574759947,
"grad_norm": 1.763102650642395,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.036,
"step": 7
},
{
"epoch": 0.02194787379972565,
"grad_norm": 1.5287824869155884,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.984,
"step": 8
},
{
"epoch": 0.024691358024691357,
"grad_norm": 1.2035481929779053,
"learning_rate": 1.8e-05,
"loss": 1.928,
"step": 9
},
{
"epoch": 0.027434842249657063,
"grad_norm": 1.0602883100509644,
"learning_rate": 2e-05,
"loss": 1.9322,
"step": 10
},
{
"epoch": 0.03017832647462277,
"grad_norm": 0.9456723928451538,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.9257,
"step": 11
},
{
"epoch": 0.03292181069958848,
"grad_norm": 0.7646191120147705,
"learning_rate": 2.4e-05,
"loss": 1.8057,
"step": 12
},
{
"epoch": 0.03566529492455418,
"grad_norm": 0.6718866229057312,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.8535,
"step": 13
},
{
"epoch": 0.038408779149519894,
"grad_norm": 0.5136308073997498,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.8533,
"step": 14
},
{
"epoch": 0.0411522633744856,
"grad_norm": 0.4066430628299713,
"learning_rate": 3e-05,
"loss": 1.7867,
"step": 15
},
{
"epoch": 0.0438957475994513,
"grad_norm": 0.3591379225254059,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.8729,
"step": 16
},
{
"epoch": 0.04663923182441701,
"grad_norm": 0.3689568340778351,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.8186,
"step": 17
},
{
"epoch": 0.04938271604938271,
"grad_norm": 0.3991197645664215,
"learning_rate": 3.6e-05,
"loss": 1.8485,
"step": 18
},
{
"epoch": 0.05212620027434842,
"grad_norm": 0.3603827655315399,
"learning_rate": 3.8e-05,
"loss": 1.8081,
"step": 19
},
{
"epoch": 0.05486968449931413,
"grad_norm": 0.3805089592933655,
"learning_rate": 4e-05,
"loss": 1.7553,
"step": 20
},
{
"epoch": 0.05761316872427984,
"grad_norm": 0.42597419023513794,
"learning_rate": 4.2e-05,
"loss": 1.8115,
"step": 21
},
{
"epoch": 0.06035665294924554,
"grad_norm": 0.37801623344421387,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.7161,
"step": 22
},
{
"epoch": 0.06310013717421124,
"grad_norm": 0.35600143671035767,
"learning_rate": 4.600000000000001e-05,
"loss": 1.7315,
"step": 23
},
{
"epoch": 0.06584362139917696,
"grad_norm": 0.4111214280128479,
"learning_rate": 4.8e-05,
"loss": 1.7091,
"step": 24
},
{
"epoch": 0.06858710562414266,
"grad_norm": 0.4117395579814911,
"learning_rate": 5e-05,
"loss": 1.7026,
"step": 25
},
{
"epoch": 0.07133058984910837,
"grad_norm": 0.4069993197917938,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.6733,
"step": 26
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.4196024239063263,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.6999,
"step": 27
},
{
"epoch": 0.07681755829903979,
"grad_norm": 0.45915013551712036,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.6613,
"step": 28
},
{
"epoch": 0.07956104252400549,
"grad_norm": 0.44673749804496765,
"learning_rate": 5.8e-05,
"loss": 1.7039,
"step": 29
},
{
"epoch": 0.0823045267489712,
"grad_norm": 0.5381506085395813,
"learning_rate": 6e-05,
"loss": 1.5768,
"step": 30
},
{
"epoch": 0.0850480109739369,
"grad_norm": 0.4311385750770569,
"learning_rate": 6.2e-05,
"loss": 1.5598,
"step": 31
},
{
"epoch": 0.0877914951989026,
"grad_norm": 0.38209667801856995,
"learning_rate": 6.400000000000001e-05,
"loss": 1.511,
"step": 32
},
{
"epoch": 0.09053497942386832,
"grad_norm": 0.35374918580055237,
"learning_rate": 6.6e-05,
"loss": 1.4692,
"step": 33
},
{
"epoch": 0.09327846364883402,
"grad_norm": 0.4020269215106964,
"learning_rate": 6.800000000000001e-05,
"loss": 1.5276,
"step": 34
},
{
"epoch": 0.09602194787379972,
"grad_norm": 0.46055856347084045,
"learning_rate": 7e-05,
"loss": 1.4634,
"step": 35
},
{
"epoch": 0.09876543209876543,
"grad_norm": 0.4301123023033142,
"learning_rate": 7.2e-05,
"loss": 1.4151,
"step": 36
},
{
"epoch": 0.10150891632373114,
"grad_norm": 0.34214499592781067,
"learning_rate": 7.4e-05,
"loss": 1.4451,
"step": 37
},
{
"epoch": 0.10425240054869685,
"grad_norm": 0.24707239866256714,
"learning_rate": 7.6e-05,
"loss": 1.3434,
"step": 38
},
{
"epoch": 0.10699588477366255,
"grad_norm": 0.16371050477027893,
"learning_rate": 7.800000000000001e-05,
"loss": 1.4134,
"step": 39
},
{
"epoch": 0.10973936899862825,
"grad_norm": 0.11528003960847855,
"learning_rate": 8e-05,
"loss": 1.4896,
"step": 40
},
{
"epoch": 0.11248285322359397,
"grad_norm": 0.11622235178947449,
"learning_rate": 8.2e-05,
"loss": 1.3957,
"step": 41
},
{
"epoch": 0.11522633744855967,
"grad_norm": 0.10983709245920181,
"learning_rate": 8.4e-05,
"loss": 1.421,
"step": 42
},
{
"epoch": 0.11796982167352538,
"grad_norm": 0.10124485194683075,
"learning_rate": 8.6e-05,
"loss": 1.4127,
"step": 43
},
{
"epoch": 0.12071330589849108,
"grad_norm": 0.10291855037212372,
"learning_rate": 8.800000000000001e-05,
"loss": 1.4187,
"step": 44
},
{
"epoch": 0.12345679012345678,
"grad_norm": 0.10925430059432983,
"learning_rate": 9e-05,
"loss": 1.3476,
"step": 45
},
{
"epoch": 0.1262002743484225,
"grad_norm": 0.10825473070144653,
"learning_rate": 9.200000000000001e-05,
"loss": 1.4427,
"step": 46
},
{
"epoch": 0.1289437585733882,
"grad_norm": 0.10768264532089233,
"learning_rate": 9.4e-05,
"loss": 1.4292,
"step": 47
},
{
"epoch": 0.13168724279835392,
"grad_norm": 0.11937709152698517,
"learning_rate": 9.6e-05,
"loss": 1.3234,
"step": 48
},
{
"epoch": 0.13443072702331962,
"grad_norm": 0.11960555613040924,
"learning_rate": 9.8e-05,
"loss": 1.3566,
"step": 49
},
{
"epoch": 0.13717421124828533,
"grad_norm": 0.12491138279438019,
"learning_rate": 0.0001,
"loss": 1.4201,
"step": 50
},
{
"epoch": 0.13991769547325103,
"grad_norm": 0.11906778067350388,
"learning_rate": 0.00010200000000000001,
"loss": 1.3745,
"step": 51
},
{
"epoch": 0.14266117969821673,
"grad_norm": 0.12701214849948883,
"learning_rate": 0.00010400000000000001,
"loss": 1.3592,
"step": 52
},
{
"epoch": 0.14540466392318244,
"grad_norm": 0.14920316636562347,
"learning_rate": 0.00010600000000000002,
"loss": 1.3139,
"step": 53
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.157944455742836,
"learning_rate": 0.00010800000000000001,
"loss": 1.3704,
"step": 54
},
{
"epoch": 0.15089163237311384,
"grad_norm": 0.15193891525268555,
"learning_rate": 0.00011000000000000002,
"loss": 1.3694,
"step": 55
},
{
"epoch": 0.15363511659807957,
"grad_norm": 0.15829141438007355,
"learning_rate": 0.00011200000000000001,
"loss": 1.3351,
"step": 56
},
{
"epoch": 0.15637860082304528,
"grad_norm": 0.16939309239387512,
"learning_rate": 0.00011399999999999999,
"loss": 1.3487,
"step": 57
},
{
"epoch": 0.15912208504801098,
"grad_norm": 0.19683969020843506,
"learning_rate": 0.000116,
"loss": 1.2837,
"step": 58
},
{
"epoch": 0.16186556927297668,
"grad_norm": 0.17148783802986145,
"learning_rate": 0.000118,
"loss": 1.3372,
"step": 59
},
{
"epoch": 0.1646090534979424,
"grad_norm": 0.17560726404190063,
"learning_rate": 0.00012,
"loss": 1.3896,
"step": 60
},
{
"epoch": 0.1673525377229081,
"grad_norm": 0.17536282539367676,
"learning_rate": 0.000122,
"loss": 1.4028,
"step": 61
},
{
"epoch": 0.1700960219478738,
"grad_norm": 0.16491512954235077,
"learning_rate": 0.000124,
"loss": 1.3366,
"step": 62
},
{
"epoch": 0.1728395061728395,
"grad_norm": 0.1202845573425293,
"learning_rate": 0.000126,
"loss": 1.3201,
"step": 63
},
{
"epoch": 0.1755829903978052,
"grad_norm": 0.10146432369947433,
"learning_rate": 0.00012800000000000002,
"loss": 1.3083,
"step": 64
},
{
"epoch": 0.17832647462277093,
"grad_norm": 0.0989551916718483,
"learning_rate": 0.00013000000000000002,
"loss": 1.3216,
"step": 65
},
{
"epoch": 0.18106995884773663,
"grad_norm": 0.09368593990802765,
"learning_rate": 0.000132,
"loss": 1.3409,
"step": 66
},
{
"epoch": 0.18381344307270234,
"grad_norm": 0.09617207944393158,
"learning_rate": 0.000134,
"loss": 1.3192,
"step": 67
},
{
"epoch": 0.18655692729766804,
"grad_norm": 0.08890332281589508,
"learning_rate": 0.00013600000000000003,
"loss": 1.354,
"step": 68
},
{
"epoch": 0.18930041152263374,
"grad_norm": 0.11371646821498871,
"learning_rate": 0.000138,
"loss": 1.3201,
"step": 69
},
{
"epoch": 0.19204389574759945,
"grad_norm": 0.09785107523202896,
"learning_rate": 0.00014,
"loss": 1.2413,
"step": 70
},
{
"epoch": 0.19478737997256515,
"grad_norm": 0.09149904549121857,
"learning_rate": 0.000142,
"loss": 1.2548,
"step": 71
},
{
"epoch": 0.19753086419753085,
"grad_norm": 0.08837990462779999,
"learning_rate": 0.000144,
"loss": 1.3014,
"step": 72
},
{
"epoch": 0.20027434842249658,
"grad_norm": 0.08963413536548615,
"learning_rate": 0.000146,
"loss": 1.3128,
"step": 73
},
{
"epoch": 0.2030178326474623,
"grad_norm": 0.08815225213766098,
"learning_rate": 0.000148,
"loss": 1.3321,
"step": 74
},
{
"epoch": 0.205761316872428,
"grad_norm": 0.09394700825214386,
"learning_rate": 0.00015000000000000001,
"loss": 1.3341,
"step": 75
},
{
"epoch": 0.2085048010973937,
"grad_norm": 0.10041660070419312,
"learning_rate": 0.000152,
"loss": 1.2944,
"step": 76
},
{
"epoch": 0.2112482853223594,
"grad_norm": 0.09344102442264557,
"learning_rate": 0.000154,
"loss": 1.3226,
"step": 77
},
{
"epoch": 0.2139917695473251,
"grad_norm": 0.09259933233261108,
"learning_rate": 0.00015600000000000002,
"loss": 1.2942,
"step": 78
},
{
"epoch": 0.2167352537722908,
"grad_norm": 0.09426167607307434,
"learning_rate": 0.00015800000000000002,
"loss": 1.333,
"step": 79
},
{
"epoch": 0.2194787379972565,
"grad_norm": 0.09674811363220215,
"learning_rate": 0.00016,
"loss": 1.3242,
"step": 80
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.09802138805389404,
"learning_rate": 0.000162,
"loss": 1.3322,
"step": 81
},
{
"epoch": 0.22496570644718794,
"grad_norm": 0.09528470784425735,
"learning_rate": 0.000164,
"loss": 1.2384,
"step": 82
},
{
"epoch": 0.22770919067215364,
"grad_norm": 0.0997876301407814,
"learning_rate": 0.000166,
"loss": 1.3646,
"step": 83
},
{
"epoch": 0.23045267489711935,
"grad_norm": 0.09820306301116943,
"learning_rate": 0.000168,
"loss": 1.1982,
"step": 84
}
],
"logging_steps": 1,
"max_steps": 364,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0405766029849805e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}