leixa's picture
Training in progress, step 81, checkpoint
07b11a8 verified
raw
history blame
7.52 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07383773928896992,
"eval_steps": 9,
"global_step": 81,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009115770282588879,
"eval_loss": 2.555964469909668,
"eval_runtime": 406.622,
"eval_samples_per_second": 4.545,
"eval_steps_per_second": 0.568,
"step": 1
},
{
"epoch": 0.0027347310847766638,
"grad_norm": 7.979882717132568,
"learning_rate": 3e-05,
"loss": 10.1658,
"step": 3
},
{
"epoch": 0.0054694621695533276,
"grad_norm": 6.485815048217773,
"learning_rate": 6e-05,
"loss": 9.9619,
"step": 6
},
{
"epoch": 0.008204193254329991,
"grad_norm": 26.487564086914062,
"learning_rate": 9e-05,
"loss": 9.7054,
"step": 9
},
{
"epoch": 0.008204193254329991,
"eval_loss": 2.3843369483947754,
"eval_runtime": 408.562,
"eval_samples_per_second": 4.523,
"eval_steps_per_second": 0.565,
"step": 9
},
{
"epoch": 0.010938924339106655,
"grad_norm": 4.259462833404541,
"learning_rate": 9.987820251299122e-05,
"loss": 9.506,
"step": 12
},
{
"epoch": 0.013673655423883319,
"grad_norm": 4.685791492462158,
"learning_rate": 9.924038765061042e-05,
"loss": 9.2787,
"step": 15
},
{
"epoch": 0.016408386508659983,
"grad_norm": 6.113475799560547,
"learning_rate": 9.806308479691595e-05,
"loss": 8.9251,
"step": 18
},
{
"epoch": 0.016408386508659983,
"eval_loss": 2.118788003921509,
"eval_runtime": 408.4849,
"eval_samples_per_second": 4.524,
"eval_steps_per_second": 0.566,
"step": 18
},
{
"epoch": 0.019143117593436645,
"grad_norm": 19.60219955444336,
"learning_rate": 9.635919272833938e-05,
"loss": 8.2305,
"step": 21
},
{
"epoch": 0.02187784867821331,
"grad_norm": 23.08515167236328,
"learning_rate": 9.414737964294636e-05,
"loss": 7.2251,
"step": 24
},
{
"epoch": 0.024612579762989972,
"grad_norm": 15.243215560913086,
"learning_rate": 9.145187862775209e-05,
"loss": 6.5719,
"step": 27
},
{
"epoch": 0.024612579762989972,
"eval_loss": 1.5913326740264893,
"eval_runtime": 408.4775,
"eval_samples_per_second": 4.524,
"eval_steps_per_second": 0.566,
"step": 27
},
{
"epoch": 0.027347310847766638,
"grad_norm": 10.839764595031738,
"learning_rate": 8.83022221559489e-05,
"loss": 5.6314,
"step": 30
},
{
"epoch": 0.0300820419325433,
"grad_norm": 9.605040550231934,
"learning_rate": 8.473291852294987e-05,
"loss": 5.397,
"step": 33
},
{
"epoch": 0.032816773017319965,
"grad_norm": 12.7434720993042,
"learning_rate": 8.07830737662829e-05,
"loss": 4.4798,
"step": 36
},
{
"epoch": 0.032816773017319965,
"eval_loss": 1.2467855215072632,
"eval_runtime": 408.5454,
"eval_samples_per_second": 4.523,
"eval_steps_per_second": 0.565,
"step": 36
},
{
"epoch": 0.03555150410209663,
"grad_norm": 10.537009239196777,
"learning_rate": 7.649596321166024e-05,
"loss": 5.077,
"step": 39
},
{
"epoch": 0.03828623518687329,
"grad_norm": 9.679017066955566,
"learning_rate": 7.191855733945387e-05,
"loss": 5.2566,
"step": 42
},
{
"epoch": 0.04102096627164995,
"grad_norm": 15.049214363098145,
"learning_rate": 6.710100716628344e-05,
"loss": 4.4715,
"step": 45
},
{
"epoch": 0.04102096627164995,
"eval_loss": 1.105316162109375,
"eval_runtime": 408.5369,
"eval_samples_per_second": 4.523,
"eval_steps_per_second": 0.565,
"step": 45
},
{
"epoch": 0.04375569735642662,
"grad_norm": 10.599452018737793,
"learning_rate": 6.209609477998338e-05,
"loss": 4.073,
"step": 48
},
{
"epoch": 0.04649042844120328,
"grad_norm": 8.252169609069824,
"learning_rate": 5.695865504800327e-05,
"loss": 4.3738,
"step": 51
},
{
"epoch": 0.049225159525979945,
"grad_norm": 10.953838348388672,
"learning_rate": 5.174497483512506e-05,
"loss": 3.5531,
"step": 54
},
{
"epoch": 0.049225159525979945,
"eval_loss": 0.9935899972915649,
"eval_runtime": 408.5367,
"eval_samples_per_second": 4.523,
"eval_steps_per_second": 0.565,
"step": 54
},
{
"epoch": 0.05195989061075661,
"grad_norm": 9.884892463684082,
"learning_rate": 4.6512176312793736e-05,
"loss": 3.4851,
"step": 57
},
{
"epoch": 0.054694621695533276,
"grad_norm": 8.605599403381348,
"learning_rate": 4.131759111665349e-05,
"loss": 4.5235,
"step": 60
},
{
"epoch": 0.05742935278030994,
"grad_norm": 8.588393211364746,
"learning_rate": 3.6218132209150045e-05,
"loss": 3.8852,
"step": 63
},
{
"epoch": 0.05742935278030994,
"eval_loss": 0.9350164532661438,
"eval_runtime": 408.6463,
"eval_samples_per_second": 4.522,
"eval_steps_per_second": 0.565,
"step": 63
},
{
"epoch": 0.0601640838650866,
"grad_norm": 9.282758712768555,
"learning_rate": 3.12696703292044e-05,
"loss": 3.6969,
"step": 66
},
{
"epoch": 0.06289881494986327,
"grad_norm": 9.940718650817871,
"learning_rate": 2.6526421860705473e-05,
"loss": 3.4917,
"step": 69
},
{
"epoch": 0.06563354603463993,
"grad_norm": 10.431665420532227,
"learning_rate": 2.2040354826462668e-05,
"loss": 3.7412,
"step": 72
},
{
"epoch": 0.06563354603463993,
"eval_loss": 0.8883235454559326,
"eval_runtime": 408.7916,
"eval_samples_per_second": 4.521,
"eval_steps_per_second": 0.565,
"step": 72
},
{
"epoch": 0.06836827711941659,
"grad_norm": 9.749879837036133,
"learning_rate": 1.7860619515673033e-05,
"loss": 3.1491,
"step": 75
},
{
"epoch": 0.07110300820419325,
"grad_norm": 8.8101224899292,
"learning_rate": 1.4033009983067452e-05,
"loss": 2.7604,
"step": 78
},
{
"epoch": 0.07383773928896992,
"grad_norm": 10.01700210571289,
"learning_rate": 1.0599462319663905e-05,
"loss": 3.1736,
"step": 81
},
{
"epoch": 0.07383773928896992,
"eval_loss": 0.8615151047706604,
"eval_runtime": 408.7768,
"eval_samples_per_second": 4.521,
"eval_steps_per_second": 0.565,
"step": 81
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.148060618471506e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}