|
{ |
|
"best_metric": 1.6594650745391846, |
|
"best_model_checkpoint": "/data/user_data/gonilude/python_and_text_pythia_70m/checkpoint-150", |
|
"epoch": 3.0, |
|
"eval_steps": 50, |
|
"global_step": 237, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0, |
|
"eval_accuracy": 0.22535211267605634, |
|
"eval_loss": 7.193855285644531, |
|
"eval_runtime": 0.2991, |
|
"eval_samples_per_second": 237.392, |
|
"eval_steps_per_second": 30.092, |
|
"num_input_tokens_seen": 0, |
|
"step": 0 |
|
}, |
|
{ |
|
"epoch": 0.012658227848101266, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 8.2293, |
|
"num_input_tokens_seen": 8192, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06329113924050633, |
|
"grad_norm": Infinity, |
|
"learning_rate": 0.0, |
|
"loss": 8.6977, |
|
"num_input_tokens_seen": 40960, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12658227848101267, |
|
"grad_norm": 434.0960388183594, |
|
"learning_rate": 1e-05, |
|
"loss": 8.1873, |
|
"num_input_tokens_seen": 81920, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.189873417721519, |
|
"grad_norm": 457.978271484375, |
|
"learning_rate": 2e-05, |
|
"loss": 5.5601, |
|
"num_input_tokens_seen": 122880, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25316455696202533, |
|
"grad_norm": 342.55340576171875, |
|
"learning_rate": 1.9976483726428423e-05, |
|
"loss": 3.0969, |
|
"num_input_tokens_seen": 163840, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31645569620253167, |
|
"grad_norm": 1078.478271484375, |
|
"learning_rate": 1.9939835156657616e-05, |
|
"loss": 2.4498, |
|
"num_input_tokens_seen": 204800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.379746835443038, |
|
"grad_norm": 316.6920166015625, |
|
"learning_rate": 1.9841388720031727e-05, |
|
"loss": 1.9535, |
|
"num_input_tokens_seen": 245760, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4430379746835443, |
|
"grad_norm": 692.9888916015625, |
|
"learning_rate": 1.9729275686705832e-05, |
|
"loss": 2.7533, |
|
"num_input_tokens_seen": 286720, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 223.78614807128906, |
|
"learning_rate": 1.9547993211399753e-05, |
|
"loss": 1.9428, |
|
"num_input_tokens_seen": 327680, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.569620253164557, |
|
"grad_norm": 218.17137145996094, |
|
"learning_rate": 1.932180409200991e-05, |
|
"loss": 2.6774, |
|
"num_input_tokens_seen": 368640, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6329113924050633, |
|
"grad_norm": 219.60426330566406, |
|
"learning_rate": 1.905177215357839e-05, |
|
"loss": 1.7511, |
|
"num_input_tokens_seen": 409600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6329113924050633, |
|
"eval_accuracy": 0.14084507042253522, |
|
"eval_loss": 1.9312864542007446, |
|
"eval_runtime": 0.1429, |
|
"eval_samples_per_second": 496.992, |
|
"eval_steps_per_second": 62.999, |
|
"num_input_tokens_seen": 409600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6962025316455697, |
|
"grad_norm": 167.01634216308594, |
|
"learning_rate": 1.8739167425092644e-05, |
|
"loss": 1.8037, |
|
"num_input_tokens_seen": 450560, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.759493670886076, |
|
"grad_norm": 194.86444091796875, |
|
"learning_rate": 1.838546016621564e-05, |
|
"loss": 1.7087, |
|
"num_input_tokens_seen": 491520, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8227848101265823, |
|
"grad_norm": 182.9615478515625, |
|
"learning_rate": 1.7992313952280175e-05, |
|
"loss": 1.8595, |
|
"num_input_tokens_seen": 532480, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8860759493670886, |
|
"grad_norm": 387.1624450683594, |
|
"learning_rate": 1.7561577850070355e-05, |
|
"loss": 1.7605, |
|
"num_input_tokens_seen": 573440, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9493670886075949, |
|
"grad_norm": 115.52404022216797, |
|
"learning_rate": 1.709527772118953e-05, |
|
"loss": 1.8325, |
|
"num_input_tokens_seen": 614400, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 225.36166381835938, |
|
"learning_rate": 1.659560669391714e-05, |
|
"loss": 1.805, |
|
"num_input_tokens_seen": 655360, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0759493670886076, |
|
"grad_norm": 363.2311096191406, |
|
"learning_rate": 1.6064914848367818e-05, |
|
"loss": 2.0046, |
|
"num_input_tokens_seen": 696320, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.139240506329114, |
|
"grad_norm": 244.0623016357422, |
|
"learning_rate": 1.5505698163465986e-05, |
|
"loss": 1.7745, |
|
"num_input_tokens_seen": 737280, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.2025316455696202, |
|
"grad_norm": 201.84446716308594, |
|
"learning_rate": 1.4920586777721231e-05, |
|
"loss": 1.7372, |
|
"num_input_tokens_seen": 778240, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.2658227848101267, |
|
"grad_norm": 164.17694091796875, |
|
"learning_rate": 1.4312332619016964e-05, |
|
"loss": 1.8156, |
|
"num_input_tokens_seen": 819200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2658227848101267, |
|
"eval_accuracy": 0.22535211267605634, |
|
"eval_loss": 1.7096410989761353, |
|
"eval_runtime": 0.1476, |
|
"eval_samples_per_second": 481.143, |
|
"eval_steps_per_second": 60.99, |
|
"num_input_tokens_seen": 819200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3291139240506329, |
|
"grad_norm": 234.53009033203125, |
|
"learning_rate": 1.3683796461592604e-05, |
|
"loss": 1.7591, |
|
"num_input_tokens_seen": 860160, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.3924050632911391, |
|
"grad_norm": 189.70046997070312, |
|
"learning_rate": 1.3037934471093683e-05, |
|
"loss": 1.6518, |
|
"num_input_tokens_seen": 901120, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4556962025316456, |
|
"grad_norm": 167.0009002685547, |
|
"learning_rate": 1.2377784300971807e-05, |
|
"loss": 1.7383, |
|
"num_input_tokens_seen": 942080, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.518987341772152, |
|
"grad_norm": 210.74073791503906, |
|
"learning_rate": 1.1706450805626762e-05, |
|
"loss": 1.6693, |
|
"num_input_tokens_seen": 983040, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5822784810126582, |
|
"grad_norm": 113.87993621826172, |
|
"learning_rate": 1.1027091437485404e-05, |
|
"loss": 1.5721, |
|
"num_input_tokens_seen": 1024000, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.6455696202531644, |
|
"grad_norm": 175.82958984375, |
|
"learning_rate": 1.0342901396698658e-05, |
|
"loss": 1.8908, |
|
"num_input_tokens_seen": 1064960, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.7088607594936709, |
|
"grad_norm": 94.99879455566406, |
|
"learning_rate": 9.657098603301347e-06, |
|
"loss": 1.7056, |
|
"num_input_tokens_seen": 1105920, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.7721518987341773, |
|
"grad_norm": 142.98709106445312, |
|
"learning_rate": 8.9729085625146e-06, |
|
"loss": 1.62, |
|
"num_input_tokens_seen": 1146880, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.8354430379746836, |
|
"grad_norm": 149.57101440429688, |
|
"learning_rate": 8.293549194373243e-06, |
|
"loss": 1.627, |
|
"num_input_tokens_seen": 1187840, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.8987341772151898, |
|
"grad_norm": 245.75396728515625, |
|
"learning_rate": 7.622215699028196e-06, |
|
"loss": 1.6432, |
|
"num_input_tokens_seen": 1228800, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8987341772151898, |
|
"eval_accuracy": 0.2112676056338028, |
|
"eval_loss": 1.6594650745391846, |
|
"eval_runtime": 0.1424, |
|
"eval_samples_per_second": 498.688, |
|
"eval_steps_per_second": 63.214, |
|
"num_input_tokens_seen": 1228800, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9620253164556962, |
|
"grad_norm": 89.35352325439453, |
|
"learning_rate": 6.962065528906321e-06, |
|
"loss": 1.6411, |
|
"num_input_tokens_seen": 1269760, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.0253164556962027, |
|
"grad_norm": 126.32188415527344, |
|
"learning_rate": 6.316203538407397e-06, |
|
"loss": 1.5683, |
|
"num_input_tokens_seen": 1310720, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.088607594936709, |
|
"grad_norm": 127.23784637451172, |
|
"learning_rate": 5.687667380983037e-06, |
|
"loss": 1.7256, |
|
"num_input_tokens_seen": 1351680, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.151898734177215, |
|
"grad_norm": 161.1302947998047, |
|
"learning_rate": 5.07941322227877e-06, |
|
"loss": 1.6769, |
|
"num_input_tokens_seen": 1392640, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.2151898734177213, |
|
"grad_norm": 203.9555206298828, |
|
"learning_rate": 4.494301836534016e-06, |
|
"loss": 1.561, |
|
"num_input_tokens_seen": 1433600, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.278481012658228, |
|
"grad_norm": 234.33477783203125, |
|
"learning_rate": 3.935085151632185e-06, |
|
"loss": 1.5984, |
|
"num_input_tokens_seen": 1474560, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.3417721518987342, |
|
"grad_norm": 168.90626525878906, |
|
"learning_rate": 3.4043933060828606e-06, |
|
"loss": 1.5909, |
|
"num_input_tokens_seen": 1515520, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.4050632911392404, |
|
"grad_norm": 99.04336547851562, |
|
"learning_rate": 2.9047222788104712e-06, |
|
"loss": 1.5661, |
|
"num_input_tokens_seen": 1556480, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.4683544303797467, |
|
"grad_norm": 272.1407470703125, |
|
"learning_rate": 2.4384221499296466e-06, |
|
"loss": 1.5917, |
|
"num_input_tokens_seen": 1597440, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 114.18236541748047, |
|
"learning_rate": 2.007686047719831e-06, |
|
"loss": 1.5408, |
|
"num_input_tokens_seen": 1638400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"eval_accuracy": 0.19718309859154928, |
|
"eval_loss": 1.6633472442626953, |
|
"eval_runtime": 0.1412, |
|
"eval_samples_per_second": 502.839, |
|
"eval_steps_per_second": 63.74, |
|
"num_input_tokens_seen": 1638400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5949367088607596, |
|
"grad_norm": 157.7032928466797, |
|
"learning_rate": 1.6145398337843654e-06, |
|
"loss": 1.5757, |
|
"num_input_tokens_seen": 1679360, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.6582278481012658, |
|
"grad_norm": 88.51991271972656, |
|
"learning_rate": 1.2608325749073591e-06, |
|
"loss": 1.5955, |
|
"num_input_tokens_seen": 1720320, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.721518987341772, |
|
"grad_norm": 139.67247009277344, |
|
"learning_rate": 9.482278464216121e-07, |
|
"loss": 1.558, |
|
"num_input_tokens_seen": 1761280, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.7848101265822782, |
|
"grad_norm": 130.27508544921875, |
|
"learning_rate": 6.781959079900958e-07, |
|
"loss": 1.5464, |
|
"num_input_tokens_seen": 1802240, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.848101265822785, |
|
"grad_norm": 163.9243621826172, |
|
"learning_rate": 4.520067886002488e-07, |
|
"loss": 1.6119, |
|
"num_input_tokens_seen": 1843200, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.911392405063291, |
|
"grad_norm": 116.64250946044922, |
|
"learning_rate": 2.707243132941717e-07, |
|
"loss": 1.5334, |
|
"num_input_tokens_seen": 1884160, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.9746835443037973, |
|
"grad_norm": 202.17108154296875, |
|
"learning_rate": 1.3520109972846918e-07, |
|
"loss": 1.5555, |
|
"num_input_tokens_seen": 1925120, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"num_input_tokens_seen": 1941504, |
|
"step": 237, |
|
"total_flos": 220374931341312.0, |
|
"train_loss": 2.1441552528349157, |
|
"train_runtime": 22.4706, |
|
"train_samples_per_second": 84.243, |
|
"train_steps_per_second": 10.547 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 237, |
|
"num_input_tokens_seen": 1941504, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 220374931341312.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|