nadejdatarabukina's picture
Training in progress, step 33, checkpoint
6fafd2c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.014035087719298246,
"eval_steps": 5,
"global_step": 33,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004253056884635832,
"eval_loss": 2.213833808898926,
"eval_runtime": 295.9015,
"eval_samples_per_second": 3.346,
"eval_steps_per_second": 1.673,
"step": 1
},
{
"epoch": 0.0012759170653907496,
"grad_norm": 5.44368314743042,
"learning_rate": 3.529411764705883e-05,
"loss": 8.8189,
"step": 3
},
{
"epoch": 0.002126528442317916,
"eval_loss": 2.185218572616577,
"eval_runtime": 298.0199,
"eval_samples_per_second": 3.322,
"eval_steps_per_second": 1.661,
"step": 5
},
{
"epoch": 0.002551834130781499,
"grad_norm": 6.1356096267700195,
"learning_rate": 7.058823529411765e-05,
"loss": 9.273,
"step": 6
},
{
"epoch": 0.003827751196172249,
"grad_norm": 5.341865539550781,
"learning_rate": 0.00010588235294117647,
"loss": 8.6346,
"step": 9
},
{
"epoch": 0.004253056884635832,
"eval_loss": 2.152097225189209,
"eval_runtime": 297.9685,
"eval_samples_per_second": 3.322,
"eval_steps_per_second": 1.661,
"step": 10
},
{
"epoch": 0.005103668261562998,
"grad_norm": 6.478035926818848,
"learning_rate": 0.0001411764705882353,
"loss": 8.6021,
"step": 12
},
{
"epoch": 0.006379585326953748,
"grad_norm": 5.044421195983887,
"learning_rate": 0.00017647058823529413,
"loss": 8.3593,
"step": 15
},
{
"epoch": 0.006379585326953748,
"eval_loss": 2.1351099014282227,
"eval_runtime": 297.5366,
"eval_samples_per_second": 3.327,
"eval_steps_per_second": 1.664,
"step": 15
},
{
"epoch": 0.007655502392344498,
"grad_norm": 4.361161231994629,
"learning_rate": 0.00019807852804032305,
"loss": 8.175,
"step": 18
},
{
"epoch": 0.008506113769271665,
"eval_loss": 2.126591205596924,
"eval_runtime": 297.8187,
"eval_samples_per_second": 3.324,
"eval_steps_per_second": 1.662,
"step": 20
},
{
"epoch": 0.008931419457735247,
"grad_norm": 4.228249549865723,
"learning_rate": 0.00017071067811865476,
"loss": 8.1639,
"step": 21
},
{
"epoch": 0.010207336523125997,
"grad_norm": 3.8976006507873535,
"learning_rate": 0.00011950903220161285,
"loss": 8.1004,
"step": 24
},
{
"epoch": 0.01063264221158958,
"eval_loss": 2.1243057250976562,
"eval_runtime": 298.0859,
"eval_samples_per_second": 3.321,
"eval_steps_per_second": 1.661,
"step": 25
},
{
"epoch": 0.011483253588516746,
"grad_norm": 4.909876346588135,
"learning_rate": 6.173165676349103e-05,
"loss": 8.4362,
"step": 27
},
{
"epoch": 0.012759170653907496,
"grad_norm": 3.950687885284424,
"learning_rate": 1.6853038769745467e-05,
"loss": 8.2356,
"step": 30
},
{
"epoch": 0.012759170653907496,
"eval_loss": 2.118710994720459,
"eval_runtime": 298.1844,
"eval_samples_per_second": 3.32,
"eval_steps_per_second": 1.66,
"step": 30
},
{
"epoch": 0.014035087719298246,
"grad_norm": 4.570465564727783,
"learning_rate": 0.0,
"loss": 7.8479,
"step": 33
}
],
"logging_steps": 3,
"max_steps": 33,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.065442902376448e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}