|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.012099213551119177, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00024198427102238354, |
|
"grad_norm": 2.4724628925323486, |
|
"learning_rate": 2e-05, |
|
"loss": 6.8564, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00024198427102238354, |
|
"eval_loss": 1.701516032218933, |
|
"eval_runtime": 34.7817, |
|
"eval_samples_per_second": 50.026, |
|
"eval_steps_per_second": 25.013, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004839685420447671, |
|
"grad_norm": 1.9025840759277344, |
|
"learning_rate": 4e-05, |
|
"loss": 6.6296, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0007259528130671506, |
|
"grad_norm": 2.4124374389648438, |
|
"learning_rate": 6e-05, |
|
"loss": 6.2767, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0009679370840895341, |
|
"grad_norm": 1.8183839321136475, |
|
"learning_rate": 8e-05, |
|
"loss": 6.3779, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0012099213551119178, |
|
"grad_norm": 1.4838781356811523, |
|
"learning_rate": 0.0001, |
|
"loss": 6.3467, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0014519056261343012, |
|
"grad_norm": 2.388962984085083, |
|
"learning_rate": 0.00012, |
|
"loss": 6.9645, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0016938898971566847, |
|
"grad_norm": 2.148257255554199, |
|
"learning_rate": 0.00014, |
|
"loss": 6.1611, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0019358741681790683, |
|
"grad_norm": 2.916733980178833, |
|
"learning_rate": 0.00016, |
|
"loss": 7.0594, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.002177858439201452, |
|
"grad_norm": 2.6664950847625732, |
|
"learning_rate": 0.00018, |
|
"loss": 6.5461, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0024198427102238356, |
|
"grad_norm": 2.471181631088257, |
|
"learning_rate": 0.0002, |
|
"loss": 5.6375, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002661826981246219, |
|
"grad_norm": 3.933795690536499, |
|
"learning_rate": 0.0001996917333733128, |
|
"loss": 5.941, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0029038112522686023, |
|
"grad_norm": 4.380200386047363, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 5.5236, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003145795523290986, |
|
"grad_norm": 4.660627365112305, |
|
"learning_rate": 0.00019723699203976766, |
|
"loss": 6.6635, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003145795523290986, |
|
"eval_loss": 1.5675233602523804, |
|
"eval_runtime": 34.6237, |
|
"eval_samples_per_second": 50.255, |
|
"eval_steps_per_second": 25.127, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0033877797943133695, |
|
"grad_norm": 4.443027973175049, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 6.2077, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.003629764065335753, |
|
"grad_norm": 3.561443567276001, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 6.7655, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0038717483363581366, |
|
"grad_norm": 4.5063886642456055, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 6.4769, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00411373260738052, |
|
"grad_norm": 4.318703651428223, |
|
"learning_rate": 0.00018526401643540922, |
|
"loss": 6.5227, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.004355716878402904, |
|
"grad_norm": 3.5019469261169434, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 6.579, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.004597701149425287, |
|
"grad_norm": 3.5339195728302, |
|
"learning_rate": 0.0001760405965600031, |
|
"loss": 6.0713, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.004839685420447671, |
|
"grad_norm": 3.601715564727783, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 6.1731, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005081669691470054, |
|
"grad_norm": 3.8356359004974365, |
|
"learning_rate": 0.00016494480483301836, |
|
"loss": 5.7358, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.005323653962492438, |
|
"grad_norm": 3.1003851890563965, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 5.5531, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0055656382335148216, |
|
"grad_norm": 2.5003840923309326, |
|
"learning_rate": 0.0001522498564715949, |
|
"loss": 4.4929, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.005807622504537205, |
|
"grad_norm": 3.624629259109497, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 5.5564, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006049606775559589, |
|
"grad_norm": 3.041182041168213, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 5.2167, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006291591046581972, |
|
"grad_norm": 3.689633846282959, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 6.2159, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.006291591046581972, |
|
"eval_loss": 1.4802727699279785, |
|
"eval_runtime": 34.6152, |
|
"eval_samples_per_second": 50.267, |
|
"eval_steps_per_second": 25.133, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.006533575317604356, |
|
"grad_norm": 3.0940749645233154, |
|
"learning_rate": 0.00012334453638559057, |
|
"loss": 6.2322, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.006775559588626739, |
|
"grad_norm": 2.8410699367523193, |
|
"learning_rate": 0.0001156434465040231, |
|
"loss": 5.9674, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.007017543859649123, |
|
"grad_norm": 3.7612247467041016, |
|
"learning_rate": 0.0001078459095727845, |
|
"loss": 6.2043, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.007259528130671506, |
|
"grad_norm": 2.866365671157837, |
|
"learning_rate": 0.0001, |
|
"loss": 5.4941, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00750151240169389, |
|
"grad_norm": 3.132918119430542, |
|
"learning_rate": 9.215409042721552e-05, |
|
"loss": 5.7106, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.007743496672716273, |
|
"grad_norm": 3.546154499053955, |
|
"learning_rate": 8.435655349597689e-05, |
|
"loss": 6.2481, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.007985480943738656, |
|
"grad_norm": 3.2497243881225586, |
|
"learning_rate": 7.66554636144095e-05, |
|
"loss": 5.8963, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00822746521476104, |
|
"grad_norm": 2.7139110565185547, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 5.1744, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.008469449485783424, |
|
"grad_norm": 3.148625373840332, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 5.7334, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.008711433756805808, |
|
"grad_norm": 3.692917823791504, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 5.1302, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00895341802782819, |
|
"grad_norm": 3.0616631507873535, |
|
"learning_rate": 4.7750143528405126e-05, |
|
"loss": 5.4077, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.009195402298850575, |
|
"grad_norm": 2.9874343872070312, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 5.458, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.009437386569872959, |
|
"grad_norm": 3.2835030555725098, |
|
"learning_rate": 3.5055195166981645e-05, |
|
"loss": 6.9377, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.009437386569872959, |
|
"eval_loss": 1.4466110467910767, |
|
"eval_runtime": 34.6976, |
|
"eval_samples_per_second": 50.148, |
|
"eval_steps_per_second": 25.074, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.009679370840895343, |
|
"grad_norm": 4.230708599090576, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 6.783, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.009921355111917725, |
|
"grad_norm": 2.771728754043579, |
|
"learning_rate": 2.3959403439996907e-05, |
|
"loss": 5.4397, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.010163339382940109, |
|
"grad_norm": 2.9131264686584473, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 5.1459, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.010405323653962493, |
|
"grad_norm": 3.516796350479126, |
|
"learning_rate": 1.4735983564590783e-05, |
|
"loss": 4.8108, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.010647307924984877, |
|
"grad_norm": 2.349928379058838, |
|
"learning_rate": 1.0899347581163221e-05, |
|
"loss": 5.0233, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.010889292196007259, |
|
"grad_norm": 3.442686080932617, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 5.8234, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.011131276467029643, |
|
"grad_norm": 4.57712459564209, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 5.9854, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.011373260738052027, |
|
"grad_norm": 2.746769666671753, |
|
"learning_rate": 2.7630079602323442e-06, |
|
"loss": 6.1983, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01161524500907441, |
|
"grad_norm": 3.05855655670166, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 4.7803, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.011857229280096793, |
|
"grad_norm": 2.8926703929901123, |
|
"learning_rate": 3.0826662668720364e-07, |
|
"loss": 5.5748, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.012099213551119177, |
|
"grad_norm": 3.569244861602783, |
|
"learning_rate": 0.0, |
|
"loss": 5.9647, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1910863162245120.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|