Nexspear's picture
Training in progress, step 100, checkpoint
2f77484 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4624277456647399,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004624277456647399,
"eval_loss": 4.693221569061279,
"eval_runtime": 23.3821,
"eval_samples_per_second": 15.61,
"eval_steps_per_second": 1.967,
"step": 1
},
{
"epoch": 0.013872832369942197,
"grad_norm": 6.138703346252441,
"learning_rate": 1.5e-05,
"loss": 18.4969,
"step": 3
},
{
"epoch": 0.027745664739884393,
"grad_norm": 6.098344802856445,
"learning_rate": 3e-05,
"loss": 18.5023,
"step": 6
},
{
"epoch": 0.04161849710982659,
"grad_norm": 8.08814525604248,
"learning_rate": 4.5e-05,
"loss": 18.2983,
"step": 9
},
{
"epoch": 0.04161849710982659,
"eval_loss": 4.422519207000732,
"eval_runtime": 23.2409,
"eval_samples_per_second": 15.705,
"eval_steps_per_second": 1.979,
"step": 9
},
{
"epoch": 0.055491329479768786,
"grad_norm": 13.294960975646973,
"learning_rate": 4.993910125649561e-05,
"loss": 17.6258,
"step": 12
},
{
"epoch": 0.06936416184971098,
"grad_norm": 12.818150520324707,
"learning_rate": 4.962019382530521e-05,
"loss": 13.5799,
"step": 15
},
{
"epoch": 0.08323699421965318,
"grad_norm": 11.56785774230957,
"learning_rate": 4.9031542398457974e-05,
"loss": 11.4697,
"step": 18
},
{
"epoch": 0.08323699421965318,
"eval_loss": 2.661552906036377,
"eval_runtime": 23.3554,
"eval_samples_per_second": 15.628,
"eval_steps_per_second": 1.97,
"step": 18
},
{
"epoch": 0.09710982658959537,
"grad_norm": 11.117833137512207,
"learning_rate": 4.817959636416969e-05,
"loss": 9.6316,
"step": 21
},
{
"epoch": 0.11098265895953757,
"grad_norm": 14.03640079498291,
"learning_rate": 4.707368982147318e-05,
"loss": 10.2925,
"step": 24
},
{
"epoch": 0.12485549132947976,
"grad_norm": 12.930510520935059,
"learning_rate": 4.572593931387604e-05,
"loss": 9.8234,
"step": 27
},
{
"epoch": 0.12485549132947976,
"eval_loss": 2.4066836833953857,
"eval_runtime": 23.3713,
"eval_samples_per_second": 15.617,
"eval_steps_per_second": 1.968,
"step": 27
},
{
"epoch": 0.13872832369942195,
"grad_norm": 13.057132720947266,
"learning_rate": 4.415111107797445e-05,
"loss": 8.9095,
"step": 30
},
{
"epoch": 0.15260115606936417,
"grad_norm": 13.501303672790527,
"learning_rate": 4.2366459261474933e-05,
"loss": 9.0641,
"step": 33
},
{
"epoch": 0.16647398843930636,
"grad_norm": 13.990789413452148,
"learning_rate": 4.039153688314145e-05,
"loss": 8.5,
"step": 36
},
{
"epoch": 0.16647398843930636,
"eval_loss": 2.281359910964966,
"eval_runtime": 23.3762,
"eval_samples_per_second": 15.614,
"eval_steps_per_second": 1.968,
"step": 36
},
{
"epoch": 0.18034682080924855,
"grad_norm": 15.291366577148438,
"learning_rate": 3.824798160583012e-05,
"loss": 9.3509,
"step": 39
},
{
"epoch": 0.19421965317919074,
"grad_norm": 12.936572074890137,
"learning_rate": 3.5959278669726935e-05,
"loss": 9.5222,
"step": 42
},
{
"epoch": 0.20809248554913296,
"grad_norm": 12.182962417602539,
"learning_rate": 3.355050358314172e-05,
"loss": 9.2995,
"step": 45
},
{
"epoch": 0.20809248554913296,
"eval_loss": 2.2252280712127686,
"eval_runtime": 23.3704,
"eval_samples_per_second": 15.618,
"eval_steps_per_second": 1.968,
"step": 45
},
{
"epoch": 0.22196531791907514,
"grad_norm": 13.763567924499512,
"learning_rate": 3.104804738999169e-05,
"loss": 8.9847,
"step": 48
},
{
"epoch": 0.23583815028901733,
"grad_norm": 10.402312278747559,
"learning_rate": 2.8479327524001636e-05,
"loss": 8.2846,
"step": 51
},
{
"epoch": 0.24971098265895952,
"grad_norm": 12.686690330505371,
"learning_rate": 2.587248741756253e-05,
"loss": 9.2072,
"step": 54
},
{
"epoch": 0.24971098265895952,
"eval_loss": 2.1948208808898926,
"eval_runtime": 23.3918,
"eval_samples_per_second": 15.604,
"eval_steps_per_second": 1.967,
"step": 54
},
{
"epoch": 0.2635838150289017,
"grad_norm": 11.070029258728027,
"learning_rate": 2.3256088156396868e-05,
"loss": 8.5202,
"step": 57
},
{
"epoch": 0.2774566473988439,
"grad_norm": 14.184294700622559,
"learning_rate": 2.0658795558326743e-05,
"loss": 9.3465,
"step": 60
},
{
"epoch": 0.29132947976878615,
"grad_norm": 11.007719993591309,
"learning_rate": 1.8109066104575023e-05,
"loss": 9.118,
"step": 63
},
{
"epoch": 0.29132947976878615,
"eval_loss": 2.174855947494507,
"eval_runtime": 23.3884,
"eval_samples_per_second": 15.606,
"eval_steps_per_second": 1.967,
"step": 63
},
{
"epoch": 0.30520231213872834,
"grad_norm": 13.206997871398926,
"learning_rate": 1.56348351646022e-05,
"loss": 8.8766,
"step": 66
},
{
"epoch": 0.3190751445086705,
"grad_norm": 10.500737190246582,
"learning_rate": 1.3263210930352737e-05,
"loss": 7.9143,
"step": 69
},
{
"epoch": 0.3329479768786127,
"grad_norm": 11.877433776855469,
"learning_rate": 1.1020177413231334e-05,
"loss": 8.8372,
"step": 72
},
{
"epoch": 0.3329479768786127,
"eval_loss": 2.1602916717529297,
"eval_runtime": 23.3824,
"eval_samples_per_second": 15.61,
"eval_steps_per_second": 1.967,
"step": 72
},
{
"epoch": 0.3468208092485549,
"grad_norm": 10.657315254211426,
"learning_rate": 8.930309757836517e-06,
"loss": 8.5161,
"step": 75
},
{
"epoch": 0.3606936416184971,
"grad_norm": 11.294054985046387,
"learning_rate": 7.016504991533726e-06,
"loss": 8.1042,
"step": 78
},
{
"epoch": 0.3745664739884393,
"grad_norm": 12.412280082702637,
"learning_rate": 5.299731159831953e-06,
"loss": 8.4474,
"step": 81
},
{
"epoch": 0.3745664739884393,
"eval_loss": 2.152019739151001,
"eval_runtime": 23.3691,
"eval_samples_per_second": 15.619,
"eval_steps_per_second": 1.968,
"step": 81
},
{
"epoch": 0.3884393063583815,
"grad_norm": 11.942791938781738,
"learning_rate": 3.798797596089351e-06,
"loss": 9.0616,
"step": 84
},
{
"epoch": 0.4023121387283237,
"grad_norm": 11.127038955688477,
"learning_rate": 2.5301488425208296e-06,
"loss": 8.395,
"step": 87
},
{
"epoch": 0.4161849710982659,
"grad_norm": 13.24832534790039,
"learning_rate": 1.5076844803522922e-06,
"loss": 9.1845,
"step": 90
},
{
"epoch": 0.4161849710982659,
"eval_loss": 2.148911237716675,
"eval_runtime": 23.3723,
"eval_samples_per_second": 15.617,
"eval_steps_per_second": 1.968,
"step": 90
},
{
"epoch": 0.4300578034682081,
"grad_norm": 11.400464057922363,
"learning_rate": 7.426068431000882e-07,
"loss": 8.8267,
"step": 93
},
{
"epoch": 0.4439306358381503,
"grad_norm": 13.761180877685547,
"learning_rate": 2.4329828146074095e-07,
"loss": 8.0885,
"step": 96
},
{
"epoch": 0.4578034682080925,
"grad_norm": 11.87588882446289,
"learning_rate": 1.522932452260595e-08,
"loss": 9.3402,
"step": 99
},
{
"epoch": 0.4578034682080925,
"eval_loss": 2.1476869583129883,
"eval_runtime": 23.3804,
"eval_samples_per_second": 15.611,
"eval_steps_per_second": 1.967,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.309190765150208e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}