nttx's picture
Training in progress, step 36, checkpoint
a1cc1c8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0638297872340425,
"eval_steps": 50,
"global_step": 36,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0851063829787234,
"grad_norm": NaN,
"learning_rate": 5.000000000000001e-07,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.0851063829787234,
"eval_loss": NaN,
"eval_runtime": 0.1404,
"eval_samples_per_second": 142.441,
"eval_steps_per_second": 35.61,
"step": 1
},
{
"epoch": 0.1702127659574468,
"grad_norm": NaN,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.2553191489361702,
"grad_norm": NaN,
"learning_rate": 1.5e-06,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.3404255319148936,
"grad_norm": NaN,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.425531914893617,
"grad_norm": NaN,
"learning_rate": 2.5e-06,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.5106382978723404,
"grad_norm": NaN,
"learning_rate": 3e-06,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.5957446808510638,
"grad_norm": NaN,
"learning_rate": 3.5e-06,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.6808510638297872,
"grad_norm": NaN,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.7659574468085106,
"grad_norm": NaN,
"learning_rate": 4.5e-06,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.851063829787234,
"grad_norm": NaN,
"learning_rate": 5e-06,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.9361702127659575,
"grad_norm": NaN,
"learning_rate": 4.981772185245135e-06,
"loss": 0.0,
"step": 11
},
{
"epoch": 1.0212765957446808,
"grad_norm": NaN,
"learning_rate": 4.927354543565131e-06,
"loss": 0.0,
"step": 12
},
{
"epoch": 1.1063829787234043,
"grad_norm": NaN,
"learning_rate": 4.837540606713538e-06,
"loss": 0.0,
"step": 13
},
{
"epoch": 1.1914893617021276,
"grad_norm": NaN,
"learning_rate": 4.7136400641330245e-06,
"loss": 0.0,
"step": 14
},
{
"epoch": 1.2765957446808511,
"grad_norm": NaN,
"learning_rate": 4.5574596647341414e-06,
"loss": 0.0,
"step": 15
},
{
"epoch": 1.3617021276595744,
"grad_norm": NaN,
"learning_rate": 4.3712768704277535e-06,
"loss": 0.0,
"step": 16
},
{
"epoch": 1.4468085106382977,
"grad_norm": NaN,
"learning_rate": 4.1578066456019885e-06,
"loss": 0.0,
"step": 17
},
{
"epoch": 1.5319148936170213,
"grad_norm": NaN,
"learning_rate": 3.92016186682789e-06,
"loss": 0.0,
"step": 18
},
{
"epoch": 1.6170212765957448,
"grad_norm": NaN,
"learning_rate": 3.661807930109422e-06,
"loss": 0.0,
"step": 19
},
{
"epoch": 1.702127659574468,
"grad_norm": NaN,
"learning_rate": 3.386512217606339e-06,
"loss": 0.0,
"step": 20
},
{
"epoch": 1.7872340425531914,
"grad_norm": NaN,
"learning_rate": 3.0982891607188948e-06,
"loss": 0.0,
"step": 21
},
{
"epoch": 1.872340425531915,
"grad_norm": NaN,
"learning_rate": 2.8013417006383078e-06,
"loss": 0.0,
"step": 22
},
{
"epoch": 1.9574468085106385,
"grad_norm": NaN,
"learning_rate": 2.5e-06,
"loss": 0.0,
"step": 23
},
{
"epoch": 2.0425531914893615,
"grad_norm": NaN,
"learning_rate": 2.1986582993616926e-06,
"loss": 0.0,
"step": 24
},
{
"epoch": 2.127659574468085,
"grad_norm": NaN,
"learning_rate": 1.9017108392811065e-06,
"loss": 0.0,
"step": 25
},
{
"epoch": 2.2127659574468086,
"grad_norm": NaN,
"learning_rate": 1.613487782393661e-06,
"loss": 0.0,
"step": 26
},
{
"epoch": 2.297872340425532,
"grad_norm": NaN,
"learning_rate": 1.3381920698905788e-06,
"loss": 0.0,
"step": 27
},
{
"epoch": 2.382978723404255,
"grad_norm": NaN,
"learning_rate": 1.079838133172111e-06,
"loss": 0.0,
"step": 28
},
{
"epoch": 2.4680851063829787,
"grad_norm": NaN,
"learning_rate": 8.421933543980126e-07,
"loss": 0.0,
"step": 29
},
{
"epoch": 2.5531914893617023,
"grad_norm": NaN,
"learning_rate": 6.28723129572247e-07,
"loss": 0.0,
"step": 30
},
{
"epoch": 2.6382978723404253,
"grad_norm": NaN,
"learning_rate": 4.4254033526585917e-07,
"loss": 0.0,
"step": 31
},
{
"epoch": 2.723404255319149,
"grad_norm": NaN,
"learning_rate": 2.8635993586697555e-07,
"loss": 0.0,
"step": 32
},
{
"epoch": 2.8085106382978724,
"grad_norm": NaN,
"learning_rate": 1.6245939328646322e-07,
"loss": 0.0,
"step": 33
},
{
"epoch": 2.8936170212765955,
"grad_norm": NaN,
"learning_rate": 7.264545643486997e-08,
"loss": 0.0,
"step": 34
},
{
"epoch": 2.978723404255319,
"grad_norm": NaN,
"learning_rate": 1.822781475486507e-08,
"loss": 0.0,
"step": 35
},
{
"epoch": 3.0638297872340425,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 36
}
],
"logging_steps": 1,
"max_steps": 36,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 44392513536.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}