{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 918, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.032679738562091505, "grad_norm": 13.884983365234152, "learning_rate": 5e-06, "loss": 0.9132, "step": 10 }, { "epoch": 0.06535947712418301, "grad_norm": 4.396307073532539, "learning_rate": 5e-06, "loss": 0.793, "step": 20 }, { "epoch": 0.09803921568627451, "grad_norm": 1.5701715316993357, "learning_rate": 5e-06, "loss": 0.7554, "step": 30 }, { "epoch": 0.13071895424836602, "grad_norm": 1.0469284618172925, "learning_rate": 5e-06, "loss": 0.7308, "step": 40 }, { "epoch": 0.16339869281045752, "grad_norm": 1.025316533729498, "learning_rate": 5e-06, "loss": 0.7074, "step": 50 }, { "epoch": 0.19607843137254902, "grad_norm": 0.8007907881089358, "learning_rate": 5e-06, "loss": 0.6935, "step": 60 }, { "epoch": 0.22875816993464052, "grad_norm": 0.7336515686610362, "learning_rate": 5e-06, "loss": 0.6863, "step": 70 }, { "epoch": 0.26143790849673204, "grad_norm": 0.6654762709275407, "learning_rate": 5e-06, "loss": 0.6737, "step": 80 }, { "epoch": 0.29411764705882354, "grad_norm": 0.6752715837026988, "learning_rate": 5e-06, "loss": 0.678, "step": 90 }, { "epoch": 0.32679738562091504, "grad_norm": 0.6495908393250515, "learning_rate": 5e-06, "loss": 0.6625, "step": 100 }, { "epoch": 0.35947712418300654, "grad_norm": 0.5513534888680728, "learning_rate": 5e-06, "loss": 0.6728, "step": 110 }, { "epoch": 0.39215686274509803, "grad_norm": 0.5987482687514573, "learning_rate": 5e-06, "loss": 0.6605, "step": 120 }, { "epoch": 0.42483660130718953, "grad_norm": 0.8982453869254353, "learning_rate": 5e-06, "loss": 0.6576, "step": 130 }, { "epoch": 0.45751633986928103, "grad_norm": 0.5729452075955007, "learning_rate": 5e-06, "loss": 0.6581, "step": 140 }, { "epoch": 0.49019607843137253, "grad_norm": 0.6356665135318063, "learning_rate": 5e-06, "loss": 0.6488, "step": 150 }, { "epoch": 0.5228758169934641, "grad_norm": 0.8565590645061701, "learning_rate": 5e-06, "loss": 0.6611, "step": 160 }, { "epoch": 0.5555555555555556, "grad_norm": 0.7114568602994797, "learning_rate": 5e-06, "loss": 0.6448, "step": 170 }, { "epoch": 0.5882352941176471, "grad_norm": 0.6611333193380935, "learning_rate": 5e-06, "loss": 0.6478, "step": 180 }, { "epoch": 0.6209150326797386, "grad_norm": 0.6896881110018678, "learning_rate": 5e-06, "loss": 0.6495, "step": 190 }, { "epoch": 0.6535947712418301, "grad_norm": 0.6559022818956797, "learning_rate": 5e-06, "loss": 0.6501, "step": 200 }, { "epoch": 0.6862745098039216, "grad_norm": 0.557250562288118, "learning_rate": 5e-06, "loss": 0.6462, "step": 210 }, { "epoch": 0.7189542483660131, "grad_norm": 0.546010994925319, "learning_rate": 5e-06, "loss": 0.6447, "step": 220 }, { "epoch": 0.7516339869281046, "grad_norm": 0.6551946905014553, "learning_rate": 5e-06, "loss": 0.6441, "step": 230 }, { "epoch": 0.7843137254901961, "grad_norm": 0.7098356396395941, "learning_rate": 5e-06, "loss": 0.6404, "step": 240 }, { "epoch": 0.8169934640522876, "grad_norm": 0.6955204100907911, "learning_rate": 5e-06, "loss": 0.6328, "step": 250 }, { "epoch": 0.8496732026143791, "grad_norm": 0.8535008368638369, "learning_rate": 5e-06, "loss": 0.6376, "step": 260 }, { "epoch": 0.8823529411764706, "grad_norm": 0.9070401054614784, "learning_rate": 5e-06, "loss": 0.639, "step": 270 }, { "epoch": 0.9150326797385621, "grad_norm": 1.565789969572099, "learning_rate": 5e-06, "loss": 0.6359, "step": 280 }, { "epoch": 0.9477124183006536, "grad_norm": 1.2559271663411875, "learning_rate": 5e-06, "loss": 0.634, "step": 290 }, { "epoch": 0.9803921568627451, "grad_norm": 0.7809173302366459, "learning_rate": 5e-06, "loss": 0.6319, "step": 300 }, { "epoch": 1.0, "eval_loss": 0.6343755125999451, "eval_runtime": 30.4377, "eval_samples_per_second": 270.75, "eval_steps_per_second": 1.084, "step": 306 }, { "epoch": 1.0130718954248366, "grad_norm": 0.8552427081362785, "learning_rate": 5e-06, "loss": 0.613, "step": 310 }, { "epoch": 1.0457516339869282, "grad_norm": 0.7483473424289742, "learning_rate": 5e-06, "loss": 0.5907, "step": 320 }, { "epoch": 1.0784313725490196, "grad_norm": 0.954467001493486, "learning_rate": 5e-06, "loss": 0.5879, "step": 330 }, { "epoch": 1.1111111111111112, "grad_norm": 0.5442657939315798, "learning_rate": 5e-06, "loss": 0.5885, "step": 340 }, { "epoch": 1.1437908496732025, "grad_norm": 0.7256618392119529, "learning_rate": 5e-06, "loss": 0.5913, "step": 350 }, { "epoch": 1.1764705882352942, "grad_norm": 0.6093376203471469, "learning_rate": 5e-06, "loss": 0.5941, "step": 360 }, { "epoch": 1.2091503267973855, "grad_norm": 0.563669838714874, "learning_rate": 5e-06, "loss": 0.5906, "step": 370 }, { "epoch": 1.2418300653594772, "grad_norm": 0.6235844786429067, "learning_rate": 5e-06, "loss": 0.5912, "step": 380 }, { "epoch": 1.2745098039215685, "grad_norm": 0.7954936323443693, "learning_rate": 5e-06, "loss": 0.5854, "step": 390 }, { "epoch": 1.3071895424836601, "grad_norm": 0.5900417117740443, "learning_rate": 5e-06, "loss": 0.5902, "step": 400 }, { "epoch": 1.3398692810457518, "grad_norm": 0.5649519787360927, "learning_rate": 5e-06, "loss": 0.5929, "step": 410 }, { "epoch": 1.3725490196078431, "grad_norm": 0.5532481138751179, "learning_rate": 5e-06, "loss": 0.5949, "step": 420 }, { "epoch": 1.4052287581699345, "grad_norm": 0.5796446258489036, "learning_rate": 5e-06, "loss": 0.591, "step": 430 }, { "epoch": 1.4379084967320261, "grad_norm": 0.5180305142972244, "learning_rate": 5e-06, "loss": 0.5955, "step": 440 }, { "epoch": 1.4705882352941178, "grad_norm": 0.5316504409753056, "learning_rate": 5e-06, "loss": 0.5916, "step": 450 }, { "epoch": 1.5032679738562091, "grad_norm": 0.5669201774818233, "learning_rate": 5e-06, "loss": 0.5884, "step": 460 }, { "epoch": 1.5359477124183005, "grad_norm": 0.5474083628106629, "learning_rate": 5e-06, "loss": 0.5896, "step": 470 }, { "epoch": 1.5686274509803921, "grad_norm": 0.5404778686275579, "learning_rate": 5e-06, "loss": 0.593, "step": 480 }, { "epoch": 1.6013071895424837, "grad_norm": 0.5872446910510764, "learning_rate": 5e-06, "loss": 0.5921, "step": 490 }, { "epoch": 1.6339869281045751, "grad_norm": 0.6115573952876731, "learning_rate": 5e-06, "loss": 0.5916, "step": 500 }, { "epoch": 1.6666666666666665, "grad_norm": 0.6081257294896645, "learning_rate": 5e-06, "loss": 0.5903, "step": 510 }, { "epoch": 1.6993464052287581, "grad_norm": 0.5308744191347566, "learning_rate": 5e-06, "loss": 0.5859, "step": 520 }, { "epoch": 1.7320261437908497, "grad_norm": 0.5659221276589558, "learning_rate": 5e-06, "loss": 0.5916, "step": 530 }, { "epoch": 1.7647058823529411, "grad_norm": 0.5192160551754689, "learning_rate": 5e-06, "loss": 0.5914, "step": 540 }, { "epoch": 1.7973856209150327, "grad_norm": 0.6988165837234015, "learning_rate": 5e-06, "loss": 0.5866, "step": 550 }, { "epoch": 1.8300653594771243, "grad_norm": 0.5487420924541063, "learning_rate": 5e-06, "loss": 0.5884, "step": 560 }, { "epoch": 1.8627450980392157, "grad_norm": 0.5682797543154547, "learning_rate": 5e-06, "loss": 0.5909, "step": 570 }, { "epoch": 1.8954248366013071, "grad_norm": 0.5077833002974906, "learning_rate": 5e-06, "loss": 0.5973, "step": 580 }, { "epoch": 1.9281045751633987, "grad_norm": 0.5129649155292699, "learning_rate": 5e-06, "loss": 0.5906, "step": 590 }, { "epoch": 1.9607843137254903, "grad_norm": 0.6199185848699229, "learning_rate": 5e-06, "loss": 0.5897, "step": 600 }, { "epoch": 1.9934640522875817, "grad_norm": 0.6367349679762161, "learning_rate": 5e-06, "loss": 0.5863, "step": 610 }, { "epoch": 2.0, "eval_loss": 0.6249033808708191, "eval_runtime": 29.8328, "eval_samples_per_second": 276.24, "eval_steps_per_second": 1.106, "step": 612 }, { "epoch": 2.026143790849673, "grad_norm": 0.7411071608832002, "learning_rate": 5e-06, "loss": 0.5586, "step": 620 }, { "epoch": 2.0588235294117645, "grad_norm": 0.5717129772857855, "learning_rate": 5e-06, "loss": 0.5407, "step": 630 }, { "epoch": 2.0915032679738563, "grad_norm": 0.6346534892777344, "learning_rate": 5e-06, "loss": 0.5442, "step": 640 }, { "epoch": 2.1241830065359477, "grad_norm": 0.5360417918450974, "learning_rate": 5e-06, "loss": 0.5411, "step": 650 }, { "epoch": 2.156862745098039, "grad_norm": 0.6070614915525901, "learning_rate": 5e-06, "loss": 0.5495, "step": 660 }, { "epoch": 2.189542483660131, "grad_norm": 0.5484881674300732, "learning_rate": 5e-06, "loss": 0.5477, "step": 670 }, { "epoch": 2.2222222222222223, "grad_norm": 0.5559727309246413, "learning_rate": 5e-06, "loss": 0.5453, "step": 680 }, { "epoch": 2.2549019607843137, "grad_norm": 0.6149254917908307, "learning_rate": 5e-06, "loss": 0.5491, "step": 690 }, { "epoch": 2.287581699346405, "grad_norm": 0.7434819330252584, "learning_rate": 5e-06, "loss": 0.5411, "step": 700 }, { "epoch": 2.3202614379084965, "grad_norm": 0.6277100245140601, "learning_rate": 5e-06, "loss": 0.539, "step": 710 }, { "epoch": 2.3529411764705883, "grad_norm": 0.6071082517695824, "learning_rate": 5e-06, "loss": 0.546, "step": 720 }, { "epoch": 2.3856209150326797, "grad_norm": 0.5763605350200824, "learning_rate": 5e-06, "loss": 0.5424, "step": 730 }, { "epoch": 2.418300653594771, "grad_norm": 0.6503780611807637, "learning_rate": 5e-06, "loss": 0.5467, "step": 740 }, { "epoch": 2.450980392156863, "grad_norm": 0.6953693200719112, "learning_rate": 5e-06, "loss": 0.551, "step": 750 }, { "epoch": 2.4836601307189543, "grad_norm": 0.582324537988359, "learning_rate": 5e-06, "loss": 0.5454, "step": 760 }, { "epoch": 2.5163398692810457, "grad_norm": 0.5697978800379555, "learning_rate": 5e-06, "loss": 0.546, "step": 770 }, { "epoch": 2.549019607843137, "grad_norm": 0.588747491644582, "learning_rate": 5e-06, "loss": 0.5434, "step": 780 }, { "epoch": 2.581699346405229, "grad_norm": 0.5761332349094056, "learning_rate": 5e-06, "loss": 0.5515, "step": 790 }, { "epoch": 2.6143790849673203, "grad_norm": 0.6003606940754802, "learning_rate": 5e-06, "loss": 0.5513, "step": 800 }, { "epoch": 2.6470588235294117, "grad_norm": 0.5450155999410424, "learning_rate": 5e-06, "loss": 0.5472, "step": 810 }, { "epoch": 2.6797385620915035, "grad_norm": 0.7041726525142917, "learning_rate": 5e-06, "loss": 0.5496, "step": 820 }, { "epoch": 2.712418300653595, "grad_norm": 0.5257674189970212, "learning_rate": 5e-06, "loss": 0.5444, "step": 830 }, { "epoch": 2.7450980392156863, "grad_norm": 0.5960484874019031, "learning_rate": 5e-06, "loss": 0.5501, "step": 840 }, { "epoch": 2.7777777777777777, "grad_norm": 0.5362794145960937, "learning_rate": 5e-06, "loss": 0.5477, "step": 850 }, { "epoch": 2.810457516339869, "grad_norm": 0.5503663269672819, "learning_rate": 5e-06, "loss": 0.5472, "step": 860 }, { "epoch": 2.843137254901961, "grad_norm": 0.5500519539060107, "learning_rate": 5e-06, "loss": 0.5512, "step": 870 }, { "epoch": 2.8758169934640523, "grad_norm": 0.6512147682006916, "learning_rate": 5e-06, "loss": 0.5521, "step": 880 }, { "epoch": 2.9084967320261437, "grad_norm": 0.5740057468439662, "learning_rate": 5e-06, "loss": 0.5449, "step": 890 }, { "epoch": 2.9411764705882355, "grad_norm": 0.5364033145980043, "learning_rate": 5e-06, "loss": 0.5498, "step": 900 }, { "epoch": 2.973856209150327, "grad_norm": 0.5484821254955965, "learning_rate": 5e-06, "loss": 0.5495, "step": 910 }, { "epoch": 3.0, "eval_loss": 0.628399670124054, "eval_runtime": 29.5588, "eval_samples_per_second": 278.8, "eval_steps_per_second": 1.116, "step": 918 }, { "epoch": 3.0, "step": 918, "total_flos": 1537684191313920.0, "train_loss": 0.6034856663290452, "train_runtime": 5898.5619, "train_samples_per_second": 79.628, "train_steps_per_second": 0.156 } ], "logging_steps": 10, "max_steps": 918, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1537684191313920.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }