adammandic87's picture
Training in progress, step 50, checkpoint
eacea6c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.08771929824561403,
"eval_steps": 13,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0017543859649122807,
"grad_norm": 8.301186561584473,
"learning_rate": 2e-05,
"loss": 14.8867,
"step": 1
},
{
"epoch": 0.0017543859649122807,
"eval_loss": 3.213578701019287,
"eval_runtime": 4.8661,
"eval_samples_per_second": 49.321,
"eval_steps_per_second": 24.66,
"step": 1
},
{
"epoch": 0.0035087719298245615,
"grad_norm": 7.328826904296875,
"learning_rate": 4e-05,
"loss": 12.4698,
"step": 2
},
{
"epoch": 0.005263157894736842,
"grad_norm": 8.940353393554688,
"learning_rate": 6e-05,
"loss": 13.4153,
"step": 3
},
{
"epoch": 0.007017543859649123,
"grad_norm": 7.479343891143799,
"learning_rate": 8e-05,
"loss": 12.7646,
"step": 4
},
{
"epoch": 0.008771929824561403,
"grad_norm": 8.830097198486328,
"learning_rate": 0.0001,
"loss": 13.7101,
"step": 5
},
{
"epoch": 0.010526315789473684,
"grad_norm": 8.61213493347168,
"learning_rate": 0.00012,
"loss": 12.4308,
"step": 6
},
{
"epoch": 0.012280701754385965,
"grad_norm": 7.831013202667236,
"learning_rate": 0.00014,
"loss": 12.4425,
"step": 7
},
{
"epoch": 0.014035087719298246,
"grad_norm": 7.707040309906006,
"learning_rate": 0.00016,
"loss": 11.3206,
"step": 8
},
{
"epoch": 0.015789473684210527,
"grad_norm": 8.187994003295898,
"learning_rate": 0.00018,
"loss": 11.6318,
"step": 9
},
{
"epoch": 0.017543859649122806,
"grad_norm": 6.9909467697143555,
"learning_rate": 0.0002,
"loss": 9.5057,
"step": 10
},
{
"epoch": 0.01929824561403509,
"grad_norm": 8.407684326171875,
"learning_rate": 0.0001996917333733128,
"loss": 11.0084,
"step": 11
},
{
"epoch": 0.021052631578947368,
"grad_norm": 7.068591117858887,
"learning_rate": 0.00019876883405951377,
"loss": 9.5473,
"step": 12
},
{
"epoch": 0.02280701754385965,
"grad_norm": 7.12372350692749,
"learning_rate": 0.00019723699203976766,
"loss": 8.5598,
"step": 13
},
{
"epoch": 0.02280701754385965,
"eval_loss": 2.0703299045562744,
"eval_runtime": 4.222,
"eval_samples_per_second": 56.845,
"eval_steps_per_second": 28.422,
"step": 13
},
{
"epoch": 0.02456140350877193,
"grad_norm": 6.699890613555908,
"learning_rate": 0.00019510565162951537,
"loss": 7.9868,
"step": 14
},
{
"epoch": 0.02631578947368421,
"grad_norm": 6.799102306365967,
"learning_rate": 0.0001923879532511287,
"loss": 8.6767,
"step": 15
},
{
"epoch": 0.028070175438596492,
"grad_norm": 6.97033166885376,
"learning_rate": 0.0001891006524188368,
"loss": 7.6695,
"step": 16
},
{
"epoch": 0.02982456140350877,
"grad_norm": 7.434906005859375,
"learning_rate": 0.00018526401643540922,
"loss": 7.1493,
"step": 17
},
{
"epoch": 0.031578947368421054,
"grad_norm": 7.40922737121582,
"learning_rate": 0.00018090169943749476,
"loss": 7.2892,
"step": 18
},
{
"epoch": 0.03333333333333333,
"grad_norm": 7.225566387176514,
"learning_rate": 0.0001760405965600031,
"loss": 7.3546,
"step": 19
},
{
"epoch": 0.03508771929824561,
"grad_norm": 8.341033935546875,
"learning_rate": 0.00017071067811865476,
"loss": 7.8588,
"step": 20
},
{
"epoch": 0.03684210526315789,
"grad_norm": 6.631705284118652,
"learning_rate": 0.00016494480483301836,
"loss": 7.1027,
"step": 21
},
{
"epoch": 0.03859649122807018,
"grad_norm": 7.108447551727295,
"learning_rate": 0.00015877852522924732,
"loss": 6.9375,
"step": 22
},
{
"epoch": 0.04035087719298246,
"grad_norm": 6.394927501678467,
"learning_rate": 0.0001522498564715949,
"loss": 6.8771,
"step": 23
},
{
"epoch": 0.042105263157894736,
"grad_norm": 5.567875862121582,
"learning_rate": 0.00014539904997395468,
"loss": 6.5752,
"step": 24
},
{
"epoch": 0.043859649122807015,
"grad_norm": 6.8372626304626465,
"learning_rate": 0.000138268343236509,
"loss": 6.7518,
"step": 25
},
{
"epoch": 0.0456140350877193,
"grad_norm": 5.824334144592285,
"learning_rate": 0.00013090169943749476,
"loss": 7.0496,
"step": 26
},
{
"epoch": 0.0456140350877193,
"eval_loss": 1.7120705842971802,
"eval_runtime": 4.2762,
"eval_samples_per_second": 56.125,
"eval_steps_per_second": 28.062,
"step": 26
},
{
"epoch": 0.04736842105263158,
"grad_norm": 6.506723880767822,
"learning_rate": 0.00012334453638559057,
"loss": 6.5923,
"step": 27
},
{
"epoch": 0.04912280701754386,
"grad_norm": 5.621894359588623,
"learning_rate": 0.0001156434465040231,
"loss": 6.1451,
"step": 28
},
{
"epoch": 0.05087719298245614,
"grad_norm": 6.360438346862793,
"learning_rate": 0.0001078459095727845,
"loss": 7.1938,
"step": 29
},
{
"epoch": 0.05263157894736842,
"grad_norm": 6.532863616943359,
"learning_rate": 0.0001,
"loss": 6.9762,
"step": 30
},
{
"epoch": 0.054385964912280704,
"grad_norm": 6.711584091186523,
"learning_rate": 9.215409042721552e-05,
"loss": 7.1213,
"step": 31
},
{
"epoch": 0.056140350877192984,
"grad_norm": 6.394320487976074,
"learning_rate": 8.435655349597689e-05,
"loss": 7.1146,
"step": 32
},
{
"epoch": 0.05789473684210526,
"grad_norm": 6.20180082321167,
"learning_rate": 7.66554636144095e-05,
"loss": 7.1802,
"step": 33
},
{
"epoch": 0.05964912280701754,
"grad_norm": 6.715718746185303,
"learning_rate": 6.909830056250527e-05,
"loss": 7.0011,
"step": 34
},
{
"epoch": 0.06140350877192982,
"grad_norm": 6.306797027587891,
"learning_rate": 6.173165676349103e-05,
"loss": 7.1968,
"step": 35
},
{
"epoch": 0.06315789473684211,
"grad_norm": 7.334612846374512,
"learning_rate": 5.4600950026045326e-05,
"loss": 6.2764,
"step": 36
},
{
"epoch": 0.06491228070175438,
"grad_norm": 6.493529796600342,
"learning_rate": 4.7750143528405126e-05,
"loss": 6.5648,
"step": 37
},
{
"epoch": 0.06666666666666667,
"grad_norm": 5.998836994171143,
"learning_rate": 4.12214747707527e-05,
"loss": 5.7859,
"step": 38
},
{
"epoch": 0.06842105263157895,
"grad_norm": 6.2978925704956055,
"learning_rate": 3.5055195166981645e-05,
"loss": 6.1908,
"step": 39
},
{
"epoch": 0.06842105263157895,
"eval_loss": 1.6301758289337158,
"eval_runtime": 4.3085,
"eval_samples_per_second": 55.704,
"eval_steps_per_second": 27.852,
"step": 39
},
{
"epoch": 0.07017543859649122,
"grad_norm": 6.921982765197754,
"learning_rate": 2.9289321881345254e-05,
"loss": 7.4431,
"step": 40
},
{
"epoch": 0.07192982456140351,
"grad_norm": 5.440037727355957,
"learning_rate": 2.3959403439996907e-05,
"loss": 6.0957,
"step": 41
},
{
"epoch": 0.07368421052631578,
"grad_norm": 6.525667667388916,
"learning_rate": 1.9098300562505266e-05,
"loss": 7.1308,
"step": 42
},
{
"epoch": 0.07543859649122807,
"grad_norm": 6.312617301940918,
"learning_rate": 1.4735983564590783e-05,
"loss": 6.5642,
"step": 43
},
{
"epoch": 0.07719298245614035,
"grad_norm": 6.488813400268555,
"learning_rate": 1.0899347581163221e-05,
"loss": 5.9102,
"step": 44
},
{
"epoch": 0.07894736842105263,
"grad_norm": 6.369126319885254,
"learning_rate": 7.612046748871327e-06,
"loss": 6.7954,
"step": 45
},
{
"epoch": 0.08070175438596491,
"grad_norm": 5.541263103485107,
"learning_rate": 4.8943483704846475e-06,
"loss": 6.9496,
"step": 46
},
{
"epoch": 0.0824561403508772,
"grad_norm": 5.264928340911865,
"learning_rate": 2.7630079602323442e-06,
"loss": 6.3512,
"step": 47
},
{
"epoch": 0.08421052631578947,
"grad_norm": 5.724519729614258,
"learning_rate": 1.231165940486234e-06,
"loss": 6.058,
"step": 48
},
{
"epoch": 0.08596491228070176,
"grad_norm": 5.485682010650635,
"learning_rate": 3.0826662668720364e-07,
"loss": 5.7932,
"step": 49
},
{
"epoch": 0.08771929824561403,
"grad_norm": 6.535744667053223,
"learning_rate": 0.0,
"loss": 6.6898,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3264746618880000.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}