|
{ |
|
"best_metric": 4.964861869812012, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1224552273075157, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002449104546150314, |
|
"grad_norm": 7.140452861785889, |
|
"learning_rate": 5e-05, |
|
"loss": 10.4955, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002449104546150314, |
|
"eval_loss": 10.970553398132324, |
|
"eval_runtime": 2.9369, |
|
"eval_samples_per_second": 936.705, |
|
"eval_steps_per_second": 117.131, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004898209092300628, |
|
"grad_norm": 6.755414009094238, |
|
"learning_rate": 0.0001, |
|
"loss": 10.6407, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007347313638450942, |
|
"grad_norm": 5.720281600952148, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 9.9123, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009796418184601256, |
|
"grad_norm": 5.765336990356445, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.0863, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012245522730751569, |
|
"grad_norm": 5.464238166809082, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 9.7016, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014694627276901883, |
|
"grad_norm": 5.588547706604004, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 9.7105, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017143731823052198, |
|
"grad_norm": 5.05673360824585, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 8.9953, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01959283636920251, |
|
"grad_norm": 5.43572473526001, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 9.1532, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022041940915352824, |
|
"grad_norm": 5.630946636199951, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 8.9852, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.024491045461503137, |
|
"grad_norm": 5.682133197784424, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 8.9945, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02694015000765345, |
|
"grad_norm": 6.015380859375, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 9.1429, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.029389254553803767, |
|
"grad_norm": 6.519438743591309, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 9.1931, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.031838359099954076, |
|
"grad_norm": 6.041949272155762, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 8.4799, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.034287463646104396, |
|
"grad_norm": 5.421126365661621, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 7.4808, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03673656819225471, |
|
"grad_norm": 5.560758590698242, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 7.4902, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03918567273840502, |
|
"grad_norm": 5.815922260284424, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 7.4089, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.041634777284555335, |
|
"grad_norm": 5.995591163635254, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 7.2682, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04408388183070565, |
|
"grad_norm": 6.248690605163574, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 7.2071, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04653298637685596, |
|
"grad_norm": 6.144330024719238, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 6.785, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.048982090923006275, |
|
"grad_norm": 6.652873516082764, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 6.6433, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05143119546915659, |
|
"grad_norm": 6.940776824951172, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 6.4511, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0538803000153069, |
|
"grad_norm": 7.697096824645996, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 6.6621, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05632940456145722, |
|
"grad_norm": 8.201127052307129, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 6.4107, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.058778509107607534, |
|
"grad_norm": 8.622825622558594, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 6.2436, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06122761365375785, |
|
"grad_norm": 10.258541107177734, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 6.5619, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06122761365375785, |
|
"eval_loss": 5.9959306716918945, |
|
"eval_runtime": 2.9203, |
|
"eval_samples_per_second": 942.031, |
|
"eval_steps_per_second": 117.797, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06367671819990815, |
|
"grad_norm": 7.211130142211914, |
|
"learning_rate": 5e-05, |
|
"loss": 5.6802, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06612582274605847, |
|
"grad_norm": 7.553848743438721, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 5.5996, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06857492729220879, |
|
"grad_norm": 7.340853691101074, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 5.5839, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0710240318383591, |
|
"grad_norm": 8.36943244934082, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 5.3825, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07347313638450942, |
|
"grad_norm": 7.856369495391846, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 5.4483, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07592224093065973, |
|
"grad_norm": 8.200360298156738, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 5.32, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07837134547681004, |
|
"grad_norm": 7.550553798675537, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 5.2876, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08082045002296036, |
|
"grad_norm": 8.68742561340332, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 5.1647, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08326955456911067, |
|
"grad_norm": 7.59812068939209, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 5.2607, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08571865911526098, |
|
"grad_norm": 6.922670364379883, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 5.166, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0881677636614113, |
|
"grad_norm": 6.234425067901611, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 5.1715, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09061686820756161, |
|
"grad_norm": 7.038822650909424, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 5.2705, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09306597275371192, |
|
"grad_norm": 7.074505805969238, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 5.3434, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09551507729986224, |
|
"grad_norm": 8.038344383239746, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 5.0634, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09796418184601255, |
|
"grad_norm": 7.753593444824219, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 4.8933, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10041328639216286, |
|
"grad_norm": 7.461465358734131, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 4.8374, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10286239093831318, |
|
"grad_norm": 6.135587215423584, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 4.8073, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10531149548446349, |
|
"grad_norm": 6.440159797668457, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 5.0602, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1077606000306138, |
|
"grad_norm": 6.288998603820801, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 4.7457, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11020970457676411, |
|
"grad_norm": 6.0663957595825195, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 4.9725, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11265880912291444, |
|
"grad_norm": 5.9057817459106445, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 4.7998, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11510791366906475, |
|
"grad_norm": 5.905740261077881, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 4.836, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11755701821521507, |
|
"grad_norm": 6.103787899017334, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 5.1476, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12000612276136538, |
|
"grad_norm": 6.240080833435059, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 4.9798, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1224552273075157, |
|
"grad_norm": 7.454318046569824, |
|
"learning_rate": 0.0, |
|
"loss": 5.0739, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1224552273075157, |
|
"eval_loss": 4.964861869812012, |
|
"eval_runtime": 2.9286, |
|
"eval_samples_per_second": 939.347, |
|
"eval_steps_per_second": 117.461, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3506283793612800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|