nadejdatarabukina's picture
Training in progress, step 30, checkpoint
6e3eeee verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.09384775808133472,
"eval_steps": 8,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0031282586027111575,
"eval_loss": 2.3120667934417725,
"eval_runtime": 14.5793,
"eval_samples_per_second": 13.855,
"eval_steps_per_second": 6.928,
"step": 1
},
{
"epoch": 0.009384775808133473,
"grad_norm": 3.425058126449585,
"learning_rate": 6e-05,
"loss": 2.2217,
"step": 3
},
{
"epoch": 0.018769551616266946,
"grad_norm": 0.7911273837089539,
"learning_rate": 0.00012,
"loss": 1.8844,
"step": 6
},
{
"epoch": 0.02502606882168926,
"eval_loss": 2.1398348808288574,
"eval_runtime": 14.6208,
"eval_samples_per_second": 13.816,
"eval_steps_per_second": 6.908,
"step": 8
},
{
"epoch": 0.028154327424400417,
"grad_norm": 0.801854133605957,
"learning_rate": 0.00018,
"loss": 2.0873,
"step": 9
},
{
"epoch": 0.03753910323253389,
"grad_norm": 0.7535266876220703,
"learning_rate": 0.00019510565162951537,
"loss": 2.1183,
"step": 12
},
{
"epoch": 0.04692387904066736,
"grad_norm": 0.9067060947418213,
"learning_rate": 0.00017071067811865476,
"loss": 2.0537,
"step": 15
},
{
"epoch": 0.05005213764337852,
"eval_loss": 2.041588306427002,
"eval_runtime": 14.6554,
"eval_samples_per_second": 13.783,
"eval_steps_per_second": 6.892,
"step": 16
},
{
"epoch": 0.056308654848800835,
"grad_norm": 0.8475841879844666,
"learning_rate": 0.00013090169943749476,
"loss": 2.0277,
"step": 18
},
{
"epoch": 0.06569343065693431,
"grad_norm": 0.7185098528862,
"learning_rate": 8.435655349597689e-05,
"loss": 1.9239,
"step": 21
},
{
"epoch": 0.07507820646506778,
"grad_norm": 0.76585453748703,
"learning_rate": 4.12214747707527e-05,
"loss": 1.9982,
"step": 24
},
{
"epoch": 0.07507820646506778,
"eval_loss": 2.0088443756103516,
"eval_runtime": 14.7377,
"eval_samples_per_second": 13.706,
"eval_steps_per_second": 6.853,
"step": 24
},
{
"epoch": 0.08446298227320125,
"grad_norm": 0.8107510805130005,
"learning_rate": 1.0899347581163221e-05,
"loss": 2.0522,
"step": 27
},
{
"epoch": 0.09384775808133472,
"grad_norm": 0.7503340244293213,
"learning_rate": 0.0,
"loss": 1.9216,
"step": 30
}
],
"logging_steps": 3,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4574107221884928.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}