vdos's picture
Training in progress, step 50, checkpoint
1e6d941 verified
{
"best_metric": 0.3012233078479767,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.2466091245376079,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004932182490752158,
"grad_norm": 1.8650400638580322,
"learning_rate": 5e-05,
"loss": 1.1438,
"step": 1
},
{
"epoch": 0.004932182490752158,
"eval_loss": 1.414349913597107,
"eval_runtime": 20.2671,
"eval_samples_per_second": 67.4,
"eval_steps_per_second": 8.437,
"step": 1
},
{
"epoch": 0.009864364981504316,
"grad_norm": 1.849665880203247,
"learning_rate": 0.0001,
"loss": 1.1763,
"step": 2
},
{
"epoch": 0.014796547472256474,
"grad_norm": 1.5722370147705078,
"learning_rate": 9.989294616193017e-05,
"loss": 1.1466,
"step": 3
},
{
"epoch": 0.01972872996300863,
"grad_norm": 1.2196251153945923,
"learning_rate": 9.957224306869053e-05,
"loss": 0.9743,
"step": 4
},
{
"epoch": 0.02466091245376079,
"grad_norm": 1.2063157558441162,
"learning_rate": 9.903926402016153e-05,
"loss": 0.8742,
"step": 5
},
{
"epoch": 0.029593094944512947,
"grad_norm": 1.196218729019165,
"learning_rate": 9.829629131445342e-05,
"loss": 0.7981,
"step": 6
},
{
"epoch": 0.0345252774352651,
"grad_norm": 1.059170126914978,
"learning_rate": 9.73465064747553e-05,
"loss": 0.7206,
"step": 7
},
{
"epoch": 0.03945745992601726,
"grad_norm": 1.107353925704956,
"learning_rate": 9.619397662556435e-05,
"loss": 0.6495,
"step": 8
},
{
"epoch": 0.04438964241676942,
"grad_norm": 1.1146334409713745,
"learning_rate": 9.484363707663442e-05,
"loss": 0.6043,
"step": 9
},
{
"epoch": 0.04932182490752158,
"grad_norm": 1.1989785432815552,
"learning_rate": 9.330127018922194e-05,
"loss": 0.5555,
"step": 10
},
{
"epoch": 0.05425400739827373,
"grad_norm": 1.366790533065796,
"learning_rate": 9.157348061512727e-05,
"loss": 0.5734,
"step": 11
},
{
"epoch": 0.059186189889025895,
"grad_norm": 1.1458686590194702,
"learning_rate": 8.966766701456177e-05,
"loss": 0.5739,
"step": 12
},
{
"epoch": 0.06411837237977805,
"grad_norm": 1.9404748678207397,
"learning_rate": 8.759199037394887e-05,
"loss": 0.3946,
"step": 13
},
{
"epoch": 0.0690505548705302,
"grad_norm": 1.2511030435562134,
"learning_rate": 8.535533905932738e-05,
"loss": 0.3832,
"step": 14
},
{
"epoch": 0.07398273736128237,
"grad_norm": 0.8801244497299194,
"learning_rate": 8.296729075500344e-05,
"loss": 0.3734,
"step": 15
},
{
"epoch": 0.07891491985203453,
"grad_norm": 0.7699196934700012,
"learning_rate": 8.043807145043604e-05,
"loss": 0.381,
"step": 16
},
{
"epoch": 0.08384710234278668,
"grad_norm": 0.6188871264457703,
"learning_rate": 7.777851165098012e-05,
"loss": 0.3598,
"step": 17
},
{
"epoch": 0.08877928483353884,
"grad_norm": 0.5674222707748413,
"learning_rate": 7.500000000000001e-05,
"loss": 0.3389,
"step": 18
},
{
"epoch": 0.093711467324291,
"grad_norm": 0.6306641101837158,
"learning_rate": 7.211443451095007e-05,
"loss": 0.3669,
"step": 19
},
{
"epoch": 0.09864364981504316,
"grad_norm": 0.4506845474243164,
"learning_rate": 6.91341716182545e-05,
"loss": 0.3072,
"step": 20
},
{
"epoch": 0.10357583230579531,
"grad_norm": 0.4561832547187805,
"learning_rate": 6.607197326515808e-05,
"loss": 0.3402,
"step": 21
},
{
"epoch": 0.10850801479654747,
"grad_norm": 0.5709429979324341,
"learning_rate": 6.294095225512603e-05,
"loss": 0.3453,
"step": 22
},
{
"epoch": 0.11344019728729964,
"grad_norm": 0.5596646666526794,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.3298,
"step": 23
},
{
"epoch": 0.11837237977805179,
"grad_norm": 0.6210820078849792,
"learning_rate": 5.6526309611002594e-05,
"loss": 0.4192,
"step": 24
},
{
"epoch": 0.12330456226880394,
"grad_norm": 1.0289266109466553,
"learning_rate": 5.327015646150716e-05,
"loss": 0.3351,
"step": 25
},
{
"epoch": 0.12330456226880394,
"eval_loss": 0.32884421944618225,
"eval_runtime": 20.2458,
"eval_samples_per_second": 67.471,
"eval_steps_per_second": 8.446,
"step": 25
},
{
"epoch": 0.1282367447595561,
"grad_norm": 0.9765304327011108,
"learning_rate": 5e-05,
"loss": 0.3001,
"step": 26
},
{
"epoch": 0.13316892725030827,
"grad_norm": 0.5973970890045166,
"learning_rate": 4.6729843538492847e-05,
"loss": 0.3099,
"step": 27
},
{
"epoch": 0.1381011097410604,
"grad_norm": 0.4834292531013489,
"learning_rate": 4.347369038899744e-05,
"loss": 0.3176,
"step": 28
},
{
"epoch": 0.14303329223181258,
"grad_norm": 0.42007580399513245,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.3156,
"step": 29
},
{
"epoch": 0.14796547472256474,
"grad_norm": 0.5430471301078796,
"learning_rate": 3.705904774487396e-05,
"loss": 0.2989,
"step": 30
},
{
"epoch": 0.15289765721331688,
"grad_norm": 0.5922935605049133,
"learning_rate": 3.392802673484193e-05,
"loss": 0.3115,
"step": 31
},
{
"epoch": 0.15782983970406905,
"grad_norm": 0.5440362691879272,
"learning_rate": 3.086582838174551e-05,
"loss": 0.3028,
"step": 32
},
{
"epoch": 0.16276202219482122,
"grad_norm": 0.536367654800415,
"learning_rate": 2.7885565489049946e-05,
"loss": 0.3017,
"step": 33
},
{
"epoch": 0.16769420468557336,
"grad_norm": 0.5181109309196472,
"learning_rate": 2.500000000000001e-05,
"loss": 0.2986,
"step": 34
},
{
"epoch": 0.17262638717632553,
"grad_norm": 0.6546456813812256,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.3319,
"step": 35
},
{
"epoch": 0.17755856966707767,
"grad_norm": 0.6339057087898254,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.3259,
"step": 36
},
{
"epoch": 0.18249075215782984,
"grad_norm": 1.3519337177276611,
"learning_rate": 1.703270924499656e-05,
"loss": 0.3915,
"step": 37
},
{
"epoch": 0.187422934648582,
"grad_norm": 0.47506600618362427,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.2711,
"step": 38
},
{
"epoch": 0.19235511713933415,
"grad_norm": 0.6212289333343506,
"learning_rate": 1.2408009626051137e-05,
"loss": 0.2839,
"step": 39
},
{
"epoch": 0.19728729963008632,
"grad_norm": 0.6477142572402954,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.2859,
"step": 40
},
{
"epoch": 0.20221948212083848,
"grad_norm": 0.8822614550590515,
"learning_rate": 8.426519384872733e-06,
"loss": 0.3087,
"step": 41
},
{
"epoch": 0.20715166461159062,
"grad_norm": 0.7327987551689148,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2934,
"step": 42
},
{
"epoch": 0.2120838471023428,
"grad_norm": 0.5722291469573975,
"learning_rate": 5.156362923365588e-06,
"loss": 0.2892,
"step": 43
},
{
"epoch": 0.21701602959309493,
"grad_norm": 0.6610158085823059,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.3084,
"step": 44
},
{
"epoch": 0.2219482120838471,
"grad_norm": 0.583101212978363,
"learning_rate": 2.653493525244721e-06,
"loss": 0.2954,
"step": 45
},
{
"epoch": 0.22688039457459927,
"grad_norm": 0.5043917298316956,
"learning_rate": 1.70370868554659e-06,
"loss": 0.2806,
"step": 46
},
{
"epoch": 0.2318125770653514,
"grad_norm": 0.624017059803009,
"learning_rate": 9.607359798384785e-07,
"loss": 0.3121,
"step": 47
},
{
"epoch": 0.23674475955610358,
"grad_norm": 0.4712766110897064,
"learning_rate": 4.277569313094809e-07,
"loss": 0.2941,
"step": 48
},
{
"epoch": 0.24167694204685575,
"grad_norm": 0.6848615407943726,
"learning_rate": 1.0705383806982606e-07,
"loss": 0.3745,
"step": 49
},
{
"epoch": 0.2466091245376079,
"grad_norm": 0.3380556106567383,
"learning_rate": 0.0,
"loss": 0.2895,
"step": 50
},
{
"epoch": 0.2466091245376079,
"eval_loss": 0.3012233078479767,
"eval_runtime": 20.2561,
"eval_samples_per_second": 67.437,
"eval_steps_per_second": 8.442,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.80081729709015e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}