{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.92436974789916, "eval_steps": 8, "global_step": 87, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03361344537815126, "grad_norm": 0.423828125, "learning_rate": 2e-05, "loss": 2.3126, "step": 1 }, { "epoch": 0.03361344537815126, "eval_loss": 2.3783609867095947, "eval_runtime": 10.4273, "eval_samples_per_second": 4.795, "eval_steps_per_second": 0.671, "step": 1 }, { "epoch": 0.06722689075630252, "grad_norm": 0.4375, "learning_rate": 4e-05, "loss": 2.3746, "step": 2 }, { "epoch": 0.10084033613445378, "grad_norm": 0.40625, "learning_rate": 6e-05, "loss": 2.5894, "step": 3 }, { "epoch": 0.13445378151260504, "grad_norm": 0.451171875, "learning_rate": 8e-05, "loss": 2.3395, "step": 4 }, { "epoch": 0.16806722689075632, "grad_norm": 0.40625, "learning_rate": 0.0001, "loss": 2.4187, "step": 5 }, { "epoch": 0.20168067226890757, "grad_norm": 0.423828125, "learning_rate": 0.00012, "loss": 2.4575, "step": 6 }, { "epoch": 0.23529411764705882, "grad_norm": 0.34375, "learning_rate": 0.00014, "loss": 2.1943, "step": 7 }, { "epoch": 0.2689075630252101, "grad_norm": 0.33203125, "learning_rate": 0.00016, "loss": 2.2933, "step": 8 }, { "epoch": 0.2689075630252101, "eval_loss": 2.1781439781188965, "eval_runtime": 10.9815, "eval_samples_per_second": 4.553, "eval_steps_per_second": 0.637, "step": 8 }, { "epoch": 0.3025210084033613, "grad_norm": 0.333984375, "learning_rate": 0.00018, "loss": 2.1869, "step": 9 }, { "epoch": 0.33613445378151263, "grad_norm": 0.279296875, "learning_rate": 0.0002, "loss": 2.297, "step": 10 }, { "epoch": 0.3697478991596639, "grad_norm": 0.30078125, "learning_rate": 0.00019995608365087946, "loss": 2.1138, "step": 11 }, { "epoch": 0.40336134453781514, "grad_norm": 0.259765625, "learning_rate": 0.00019982437317643217, "loss": 2.0531, "step": 12 }, { "epoch": 0.4369747899159664, "grad_norm": 0.3125, "learning_rate": 0.0001996049842615217, "loss": 2.0082, "step": 13 }, { "epoch": 0.47058823529411764, "grad_norm": 0.33984375, "learning_rate": 0.00019929810960135172, "loss": 2.044, "step": 14 }, { "epoch": 0.5042016806722689, "grad_norm": 0.310546875, "learning_rate": 0.0001989040187322164, "loss": 2.0348, "step": 15 }, { "epoch": 0.5378151260504201, "grad_norm": 0.287109375, "learning_rate": 0.00019842305779475968, "loss": 2.0371, "step": 16 }, { "epoch": 0.5378151260504201, "eval_loss": 2.0500473976135254, "eval_runtime": 10.9871, "eval_samples_per_second": 4.551, "eval_steps_per_second": 0.637, "step": 16 }, { "epoch": 0.5714285714285714, "grad_norm": 0.2734375, "learning_rate": 0.0001978556492299504, "loss": 2.0658, "step": 17 }, { "epoch": 0.6050420168067226, "grad_norm": 0.302734375, "learning_rate": 0.0001972022914080411, "loss": 2.126, "step": 18 }, { "epoch": 0.6386554621848739, "grad_norm": 0.2734375, "learning_rate": 0.00019646355819083589, "loss": 2.0384, "step": 19 }, { "epoch": 0.6722689075630253, "grad_norm": 0.251953125, "learning_rate": 0.00019564009842765225, "loss": 2.0598, "step": 20 }, { "epoch": 0.7058823529411765, "grad_norm": 0.27734375, "learning_rate": 0.00019473263538541914, "loss": 2.0817, "step": 21 }, { "epoch": 0.7394957983193278, "grad_norm": 0.267578125, "learning_rate": 0.0001937419661134121, "loss": 2.0959, "step": 22 }, { "epoch": 0.773109243697479, "grad_norm": 0.28125, "learning_rate": 0.00019266896074318334, "loss": 2.1694, "step": 23 }, { "epoch": 0.8067226890756303, "grad_norm": 0.240234375, "learning_rate": 0.00019151456172430183, "loss": 1.8653, "step": 24 }, { "epoch": 0.8067226890756303, "eval_loss": 2.028913736343384, "eval_runtime": 11.0811, "eval_samples_per_second": 4.512, "eval_steps_per_second": 0.632, "step": 24 }, { "epoch": 0.8403361344537815, "grad_norm": 0.2578125, "learning_rate": 0.00019027978299657436, "loss": 1.979, "step": 25 }, { "epoch": 0.8739495798319328, "grad_norm": 0.2294921875, "learning_rate": 0.00018896570909947475, "loss": 2.0706, "step": 26 }, { "epoch": 0.907563025210084, "grad_norm": 0.26171875, "learning_rate": 0.0001875734942195637, "loss": 2.1814, "step": 27 }, { "epoch": 0.9411764705882353, "grad_norm": 0.255859375, "learning_rate": 0.00018610436117673555, "loss": 1.9951, "step": 28 }, { "epoch": 0.9747899159663865, "grad_norm": 0.267578125, "learning_rate": 0.0001845596003501826, "loss": 2.1463, "step": 29 }, { "epoch": 1.0084033613445378, "grad_norm": 0.27734375, "learning_rate": 0.0001829405685450202, "loss": 2.2055, "step": 30 }, { "epoch": 1.0420168067226891, "grad_norm": 0.208984375, "learning_rate": 0.00018124868780056814, "loss": 2.0328, "step": 31 }, { "epoch": 1.0756302521008403, "grad_norm": 0.2470703125, "learning_rate": 0.00017948544414133534, "loss": 2.0311, "step": 32 }, { "epoch": 1.0756302521008403, "eval_loss": 2.0189168453216553, "eval_runtime": 11.1124, "eval_samples_per_second": 4.499, "eval_steps_per_second": 0.63, "step": 32 }, { "epoch": 1.1092436974789917, "grad_norm": 0.2080078125, "learning_rate": 0.00017765238627180424, "loss": 2.0399, "step": 33 }, { "epoch": 1.1428571428571428, "grad_norm": 0.2265625, "learning_rate": 0.00017575112421616202, "loss": 1.9265, "step": 34 }, { "epoch": 1.1764705882352942, "grad_norm": 0.205078125, "learning_rate": 0.00017378332790417273, "loss": 2.0815, "step": 35 }, { "epoch": 1.2100840336134453, "grad_norm": 0.220703125, "learning_rate": 0.00017175072570443312, "loss": 2.0716, "step": 36 }, { "epoch": 1.2436974789915967, "grad_norm": 0.23828125, "learning_rate": 0.00016965510290629972, "loss": 2.1353, "step": 37 }, { "epoch": 1.2773109243697478, "grad_norm": 0.234375, "learning_rate": 0.00016749830015182107, "loss": 2.037, "step": 38 }, { "epoch": 1.3109243697478992, "grad_norm": 0.2314453125, "learning_rate": 0.00016528221181905217, "loss": 2.1324, "step": 39 }, { "epoch": 1.3445378151260505, "grad_norm": 0.236328125, "learning_rate": 0.00016300878435817113, "loss": 1.9591, "step": 40 }, { "epoch": 1.3445378151260505, "eval_loss": 2.0105881690979004, "eval_runtime": 11.061, "eval_samples_per_second": 4.52, "eval_steps_per_second": 0.633, "step": 40 }, { "epoch": 1.3781512605042017, "grad_norm": 0.2294921875, "learning_rate": 0.00016068001458185936, "loss": 2.0045, "step": 41 }, { "epoch": 1.4117647058823528, "grad_norm": 0.25390625, "learning_rate": 0.0001582979479114472, "loss": 1.8726, "step": 42 }, { "epoch": 1.4453781512605042, "grad_norm": 0.24609375, "learning_rate": 0.00015586467658036524, "loss": 2.1408, "step": 43 }, { "epoch": 1.4789915966386555, "grad_norm": 0.2392578125, "learning_rate": 0.0001533823377964791, "loss": 1.8444, "step": 44 }, { "epoch": 1.5126050420168067, "grad_norm": 0.24609375, "learning_rate": 0.00015085311186492206, "loss": 1.8595, "step": 45 }, { "epoch": 1.5462184873949578, "grad_norm": 0.24609375, "learning_rate": 0.00014827922027307451, "loss": 1.9338, "step": 46 }, { "epoch": 1.5798319327731094, "grad_norm": 0.255859375, "learning_rate": 0.0001456629237393713, "loss": 1.9562, "step": 47 }, { "epoch": 1.6134453781512605, "grad_norm": 0.267578125, "learning_rate": 0.00014300652022765207, "loss": 1.994, "step": 48 }, { "epoch": 1.6134453781512605, "eval_loss": 2.010645866394043, "eval_runtime": 10.957, "eval_samples_per_second": 4.563, "eval_steps_per_second": 0.639, "step": 48 }, { "epoch": 1.6470588235294117, "grad_norm": 0.2890625, "learning_rate": 0.00014031234292879725, "loss": 1.9666, "step": 49 }, { "epoch": 1.680672268907563, "grad_norm": 0.26953125, "learning_rate": 0.00013758275821142382, "loss": 1.8065, "step": 50 }, { "epoch": 1.7142857142857144, "grad_norm": 0.30859375, "learning_rate": 0.0001348201635434399, "loss": 2.0882, "step": 51 }, { "epoch": 1.7478991596638656, "grad_norm": 0.2734375, "learning_rate": 0.00013202698538628376, "loss": 1.9619, "step": 52 }, { "epoch": 1.7815126050420167, "grad_norm": 0.271484375, "learning_rate": 0.00012920567706369758, "loss": 2.0829, "step": 53 }, { "epoch": 1.815126050420168, "grad_norm": 0.30859375, "learning_rate": 0.00012635871660690676, "loss": 2.0254, "step": 54 }, { "epoch": 1.8487394957983194, "grad_norm": 0.2734375, "learning_rate": 0.00012348860457809838, "loss": 2.0488, "step": 55 }, { "epoch": 1.8823529411764706, "grad_norm": 0.294921875, "learning_rate": 0.00012059786187410984, "loss": 1.9204, "step": 56 }, { "epoch": 1.8823529411764706, "eval_loss": 2.0145041942596436, "eval_runtime": 10.9679, "eval_samples_per_second": 4.559, "eval_steps_per_second": 0.638, "step": 56 }, { "epoch": 1.9159663865546217, "grad_norm": 0.279296875, "learning_rate": 0.0001176890275122573, "loss": 1.7729, "step": 57 }, { "epoch": 1.949579831932773, "grad_norm": 0.294921875, "learning_rate": 0.00011476465640024814, "loss": 2.0228, "step": 58 }, { "epoch": 1.9831932773109244, "grad_norm": 0.2734375, "learning_rate": 0.00011182731709213659, "loss": 1.869, "step": 59 }, { "epoch": 2.0168067226890756, "grad_norm": 0.353515625, "learning_rate": 0.00010887958953229349, "loss": 1.8303, "step": 60 }, { "epoch": 2.0504201680672267, "grad_norm": 0.294921875, "learning_rate": 0.00010592406278937144, "loss": 2.0863, "step": 61 }, { "epoch": 2.0840336134453783, "grad_norm": 0.275390625, "learning_rate": 0.00010296333278225599, "loss": 1.9723, "step": 62 }, { "epoch": 2.1176470588235294, "grad_norm": 0.3203125, "learning_rate": 0.0001, "loss": 1.9275, "step": 63 }, { "epoch": 2.1512605042016806, "grad_norm": 0.28515625, "learning_rate": 9.703666721774402e-05, "loss": 1.8178, "step": 64 }, { "epoch": 2.1512605042016806, "eval_loss": 2.015535831451416, "eval_runtime": 11.0392, "eval_samples_per_second": 4.529, "eval_steps_per_second": 0.634, "step": 64 }, { "epoch": 2.184873949579832, "grad_norm": 0.30078125, "learning_rate": 9.407593721062859e-05, "loss": 1.9097, "step": 65 }, { "epoch": 2.2184873949579833, "grad_norm": 0.298828125, "learning_rate": 9.112041046770653e-05, "loss": 1.8793, "step": 66 }, { "epoch": 2.2521008403361344, "grad_norm": 0.3359375, "learning_rate": 8.817268290786343e-05, "loss": 1.861, "step": 67 }, { "epoch": 2.2857142857142856, "grad_norm": 0.31640625, "learning_rate": 8.523534359975189e-05, "loss": 1.8363, "step": 68 }, { "epoch": 2.3193277310924367, "grad_norm": 0.31640625, "learning_rate": 8.231097248774274e-05, "loss": 1.8089, "step": 69 }, { "epoch": 2.3529411764705883, "grad_norm": 0.3046875, "learning_rate": 7.940213812589018e-05, "loss": 1.814, "step": 70 }, { "epoch": 2.3865546218487395, "grad_norm": 0.31640625, "learning_rate": 7.651139542190164e-05, "loss": 1.8281, "step": 71 }, { "epoch": 2.4201680672268906, "grad_norm": 0.33203125, "learning_rate": 7.364128339309326e-05, "loss": 1.8428, "step": 72 }, { "epoch": 2.4201680672268906, "eval_loss": 2.022204637527466, "eval_runtime": 11.0405, "eval_samples_per_second": 4.529, "eval_steps_per_second": 0.634, "step": 72 }, { "epoch": 2.453781512605042, "grad_norm": 0.357421875, "learning_rate": 7.079432293630244e-05, "loss": 1.904, "step": 73 }, { "epoch": 2.4873949579831933, "grad_norm": 0.349609375, "learning_rate": 6.797301461371625e-05, "loss": 1.7909, "step": 74 }, { "epoch": 2.5210084033613445, "grad_norm": 0.375, "learning_rate": 6.517983645656014e-05, "loss": 1.7367, "step": 75 }, { "epoch": 2.5546218487394956, "grad_norm": 0.349609375, "learning_rate": 6.24172417885762e-05, "loss": 1.8942, "step": 76 }, { "epoch": 2.588235294117647, "grad_norm": 0.34765625, "learning_rate": 5.96876570712028e-05, "loss": 1.8393, "step": 77 }, { "epoch": 2.6218487394957983, "grad_norm": 0.35546875, "learning_rate": 5.699347977234799e-05, "loss": 1.8254, "step": 78 }, { "epoch": 2.6554621848739495, "grad_norm": 0.349609375, "learning_rate": 5.43370762606287e-05, "loss": 1.9937, "step": 79 }, { "epoch": 2.689075630252101, "grad_norm": 0.373046875, "learning_rate": 5.172077972692553e-05, "loss": 1.947, "step": 80 }, { "epoch": 2.689075630252101, "eval_loss": 2.026715040206909, "eval_runtime": 10.9879, "eval_samples_per_second": 4.55, "eval_steps_per_second": 0.637, "step": 80 }, { "epoch": 2.722689075630252, "grad_norm": 0.326171875, "learning_rate": 4.914688813507797e-05, "loss": 1.9046, "step": 81 }, { "epoch": 2.7563025210084033, "grad_norm": 0.35546875, "learning_rate": 4.661766220352097e-05, "loss": 1.9969, "step": 82 }, { "epoch": 2.7899159663865545, "grad_norm": 0.361328125, "learning_rate": 4.4135323419634766e-05, "loss": 1.7827, "step": 83 }, { "epoch": 2.8235294117647056, "grad_norm": 0.3515625, "learning_rate": 4.170205208855281e-05, "loss": 1.8887, "step": 84 }, { "epoch": 2.857142857142857, "grad_norm": 0.373046875, "learning_rate": 3.931998541814069e-05, "loss": 1.8327, "step": 85 }, { "epoch": 2.8907563025210083, "grad_norm": 0.37890625, "learning_rate": 3.69912156418289e-05, "loss": 1.9086, "step": 86 }, { "epoch": 2.92436974789916, "grad_norm": 0.390625, "learning_rate": 3.471778818094785e-05, "loss": 1.9553, "step": 87 } ], "logging_steps": 1, "max_steps": 116, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 29, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 5464004832264192.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }