|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03250344220481683, |
|
"eval_steps": 8, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00036114935783129813, |
|
"eval_loss": 2.6933271884918213, |
|
"eval_runtime": 996.8086, |
|
"eval_samples_per_second": 9.357, |
|
"eval_steps_per_second": 4.679, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010834480734938942, |
|
"grad_norm": 0.606799304485321, |
|
"learning_rate": 3e-05, |
|
"loss": 2.5701, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0021668961469877884, |
|
"grad_norm": 0.6125881671905518, |
|
"learning_rate": 6e-05, |
|
"loss": 2.6639, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002889194862650385, |
|
"eval_loss": 2.4693448543548584, |
|
"eval_runtime": 995.3823, |
|
"eval_samples_per_second": 9.37, |
|
"eval_steps_per_second": 4.686, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003250344220481683, |
|
"grad_norm": 0.5005409121513367, |
|
"learning_rate": 9e-05, |
|
"loss": 2.455, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004333792293975577, |
|
"grad_norm": 0.5685995817184448, |
|
"learning_rate": 0.00012, |
|
"loss": 2.3321, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005417240367469472, |
|
"grad_norm": 0.3448821008205414, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.2488, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00577838972530077, |
|
"eval_loss": 2.2601964473724365, |
|
"eval_runtime": 995.6768, |
|
"eval_samples_per_second": 9.367, |
|
"eval_steps_per_second": 4.684, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.006500688440963366, |
|
"grad_norm": 0.3276103436946869, |
|
"learning_rate": 0.00018, |
|
"loss": 2.2062, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.007584136514457261, |
|
"grad_norm": 0.33101603388786316, |
|
"learning_rate": 0.00019989930665413147, |
|
"loss": 2.2203, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.008667584587951154, |
|
"grad_norm": 0.29219868779182434, |
|
"learning_rate": 0.00019839295885986296, |
|
"loss": 2.1191, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.008667584587951154, |
|
"eval_loss": 2.177788496017456, |
|
"eval_runtime": 996.3567, |
|
"eval_samples_per_second": 9.361, |
|
"eval_steps_per_second": 4.681, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00975103266144505, |
|
"grad_norm": 0.2517762780189514, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 2.1499, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.010834480734938944, |
|
"grad_norm": 0.3143475651741028, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 2.1606, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01155677945060154, |
|
"eval_loss": 2.130772590637207, |
|
"eval_runtime": 996.0058, |
|
"eval_samples_per_second": 9.364, |
|
"eval_steps_per_second": 4.683, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.011917928808432838, |
|
"grad_norm": 0.28296035528182983, |
|
"learning_rate": 0.00018345732537213027, |
|
"loss": 2.0661, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.013001376881926732, |
|
"grad_norm": 0.293642520904541, |
|
"learning_rate": 0.00017530714660036112, |
|
"loss": 2.1699, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.014084824955420626, |
|
"grad_norm": 0.27195706963539124, |
|
"learning_rate": 0.00016579387259397127, |
|
"loss": 2.0588, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.014445974313251925, |
|
"eval_loss": 2.096694231033325, |
|
"eval_runtime": 995.8729, |
|
"eval_samples_per_second": 9.366, |
|
"eval_steps_per_second": 4.683, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.015168273028914521, |
|
"grad_norm": 0.2799226641654968, |
|
"learning_rate": 0.00015508969814521025, |
|
"loss": 2.0406, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.016251721102408415, |
|
"grad_norm": 0.29726603627204895, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 2.1195, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.017335169175902308, |
|
"grad_norm": 0.278397798538208, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 2.0464, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.017335169175902308, |
|
"eval_loss": 2.0712993144989014, |
|
"eval_runtime": 996.1713, |
|
"eval_samples_per_second": 9.363, |
|
"eval_steps_per_second": 4.682, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.018418617249396203, |
|
"grad_norm": 0.2522198557853699, |
|
"learning_rate": 0.00011785568947986367, |
|
"loss": 2.0516, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0195020653228901, |
|
"grad_norm": 0.2991744875907898, |
|
"learning_rate": 0.00010448648303505151, |
|
"loss": 2.0324, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.020224364038552694, |
|
"eval_loss": 2.0532875061035156, |
|
"eval_runtime": 996.415, |
|
"eval_samples_per_second": 9.361, |
|
"eval_steps_per_second": 4.681, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.02058551339638399, |
|
"grad_norm": 0.27935004234313965, |
|
"learning_rate": 9.103606910965666e-05, |
|
"loss": 2.0368, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.021668961469877887, |
|
"grad_norm": 0.29219961166381836, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 2.0315, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02275240954337178, |
|
"grad_norm": 0.2893526554107666, |
|
"learning_rate": 6.486251759186572e-05, |
|
"loss": 2.0128, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.02311355890120308, |
|
"eval_loss": 2.0402984619140625, |
|
"eval_runtime": 995.4422, |
|
"eval_samples_per_second": 9.37, |
|
"eval_steps_per_second": 4.685, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.023835857616865675, |
|
"grad_norm": 0.3017539978027344, |
|
"learning_rate": 5.261313375270014e-05, |
|
"loss": 2.0256, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.02491930569035957, |
|
"grad_norm": 0.31945106387138367, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 2.0501, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.026002753763853463, |
|
"grad_norm": 0.2786339819431305, |
|
"learning_rate": 3.089373510131354e-05, |
|
"loss": 2.064, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.026002753763853463, |
|
"eval_loss": 2.03191876411438, |
|
"eval_runtime": 996.4736, |
|
"eval_samples_per_second": 9.36, |
|
"eval_steps_per_second": 4.681, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.02708620183734736, |
|
"grad_norm": 0.2601235508918762, |
|
"learning_rate": 2.181685175319702e-05, |
|
"loss": 2.0599, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02816964991084125, |
|
"grad_norm": 0.2593807578086853, |
|
"learning_rate": 1.415512063981339e-05, |
|
"loss": 2.0068, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.02889194862650385, |
|
"eval_loss": 2.0276479721069336, |
|
"eval_runtime": 997.0668, |
|
"eval_samples_per_second": 9.354, |
|
"eval_steps_per_second": 4.678, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.029253097984335147, |
|
"grad_norm": 0.27310940623283386, |
|
"learning_rate": 8.047222744854943e-06, |
|
"loss": 1.9976, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.030336546057829043, |
|
"grad_norm": 0.2755163013935089, |
|
"learning_rate": 3.6037139304146762e-06, |
|
"loss": 2.0511, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.03141999413132294, |
|
"grad_norm": 0.28938886523246765, |
|
"learning_rate": 9.0502382320653e-07, |
|
"loss": 2.0863, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.031781143489154236, |
|
"eval_loss": 2.0262272357940674, |
|
"eval_runtime": 996.6015, |
|
"eval_samples_per_second": 9.359, |
|
"eval_steps_per_second": 4.68, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.03250344220481683, |
|
"grad_norm": 0.26352646946907043, |
|
"learning_rate": 0.0, |
|
"loss": 1.9889, |
|
"step": 90 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 90, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.788172886114304e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|