lesso05's picture
Training in progress, step 450, checkpoint
62db433 verified
raw
history blame
11 kB
{
"best_metric": 2.7266805171966553,
"best_model_checkpoint": "miner_id_24/checkpoint-450",
"epoch": 0.02031832035218422,
"eval_steps": 50,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.5151823004853824e-05,
"eval_loss": 4.564711093902588,
"eval_runtime": 183.5644,
"eval_samples_per_second": 50.805,
"eval_steps_per_second": 12.704,
"step": 1
},
{
"epoch": 0.0004515182300485382,
"grad_norm": 4.505808353424072,
"learning_rate": 4.1e-05,
"loss": 3.6345,
"step": 10
},
{
"epoch": 0.0009030364600970764,
"grad_norm": 4.865635395050049,
"learning_rate": 8.2e-05,
"loss": 3.4077,
"step": 20
},
{
"epoch": 0.0013545546901456147,
"grad_norm": 5.399093151092529,
"learning_rate": 0.00012299999999999998,
"loss": 3.212,
"step": 30
},
{
"epoch": 0.0018060729201941528,
"grad_norm": 6.037168502807617,
"learning_rate": 0.000164,
"loss": 3.2546,
"step": 40
},
{
"epoch": 0.002257591150242691,
"grad_norm": 8.842162132263184,
"learning_rate": 0.000205,
"loss": 3.2376,
"step": 50
},
{
"epoch": 0.002257591150242691,
"eval_loss": 3.3135757446289062,
"eval_runtime": 185.7849,
"eval_samples_per_second": 50.198,
"eval_steps_per_second": 12.552,
"step": 50
},
{
"epoch": 0.0027091093802912294,
"grad_norm": 4.627725601196289,
"learning_rate": 0.00020475031515163197,
"loss": 3.1244,
"step": 60
},
{
"epoch": 0.0031606276103397675,
"grad_norm": 4.629662990570068,
"learning_rate": 0.00020400247704601096,
"loss": 3.0593,
"step": 70
},
{
"epoch": 0.0036121458403883056,
"grad_norm": 4.690089225769043,
"learning_rate": 0.00020276012907521508,
"loss": 2.9041,
"step": 80
},
{
"epoch": 0.004063664070436844,
"grad_norm": 5.650446891784668,
"learning_rate": 0.00020102932383367768,
"loss": 2.7957,
"step": 90
},
{
"epoch": 0.004515182300485382,
"grad_norm": 6.984936237335205,
"learning_rate": 0.00019881849363055561,
"loss": 3.173,
"step": 100
},
{
"epoch": 0.004515182300485382,
"eval_loss": 3.0686392784118652,
"eval_runtime": 185.0562,
"eval_samples_per_second": 50.395,
"eval_steps_per_second": 12.602,
"step": 100
},
{
"epoch": 0.004966700530533921,
"grad_norm": 6.514296531677246,
"learning_rate": 0.00019613840940836658,
"loss": 2.8477,
"step": 110
},
{
"epoch": 0.005418218760582459,
"grad_norm": 4.205728054046631,
"learning_rate": 0.00019300212826804002,
"loss": 2.9135,
"step": 120
},
{
"epoch": 0.005869736990630997,
"grad_norm": 3.894537925720215,
"learning_rate": 0.00018942492985603364,
"loss": 2.8827,
"step": 130
},
{
"epoch": 0.006321255220679535,
"grad_norm": 5.284046173095703,
"learning_rate": 0.0001854242419234321,
"loss": 3.052,
"step": 140
},
{
"epoch": 0.006772773450728073,
"grad_norm": 5.975098609924316,
"learning_rate": 0.00018101955541969522,
"loss": 3.3612,
"step": 150
},
{
"epoch": 0.006772773450728073,
"eval_loss": 2.9804024696350098,
"eval_runtime": 183.7514,
"eval_samples_per_second": 50.753,
"eval_steps_per_second": 12.691,
"step": 150
},
{
"epoch": 0.007224291680776611,
"grad_norm": 3.6714208126068115,
"learning_rate": 0.00017623232953471174,
"loss": 2.7889,
"step": 160
},
{
"epoch": 0.007675809910825149,
"grad_norm": 4.271055698394775,
"learning_rate": 0.00017108588715178296,
"loss": 2.617,
"step": 170
},
{
"epoch": 0.008127328140873687,
"grad_norm": 4.206169128417969,
"learning_rate": 0.00016560530122087996,
"loss": 2.8274,
"step": 180
},
{
"epoch": 0.008578846370922226,
"grad_norm": 4.710639476776123,
"learning_rate": 0.00015981727260575154,
"loss": 3.0149,
"step": 190
},
{
"epoch": 0.009030364600970764,
"grad_norm": 6.937736511230469,
"learning_rate": 0.00015375,
"loss": 3.3344,
"step": 200
},
{
"epoch": 0.009030364600970764,
"eval_loss": 2.9288151264190674,
"eval_runtime": 184.0887,
"eval_samples_per_second": 50.66,
"eval_steps_per_second": 12.668,
"step": 200
},
{
"epoch": 0.009481882831019302,
"grad_norm": 2.8257806301116943,
"learning_rate": 0.00014743304254588043,
"loss": 2.6591,
"step": 210
},
{
"epoch": 0.009933401061067841,
"grad_norm": 3.0818326473236084,
"learning_rate": 0.000140897175825131,
"loss": 2.7175,
"step": 220
},
{
"epoch": 0.010384919291116379,
"grad_norm": 4.327859878540039,
"learning_rate": 0.0001341742419234321,
"loss": 2.657,
"step": 230
},
{
"epoch": 0.010836437521164918,
"grad_norm": 5.220002174377441,
"learning_rate": 0.00012729699429896594,
"loss": 2.8733,
"step": 240
},
{
"epoch": 0.011287955751213455,
"grad_norm": 5.847310543060303,
"learning_rate": 0.00012029893821086038,
"loss": 2.9887,
"step": 250
},
{
"epoch": 0.011287955751213455,
"eval_loss": 2.8600337505340576,
"eval_runtime": 183.953,
"eval_samples_per_second": 50.698,
"eval_steps_per_second": 12.677,
"step": 250
},
{
"epoch": 0.011739473981261994,
"grad_norm": 2.9359610080718994,
"learning_rate": 0.00011321416748493448,
"loss": 2.7982,
"step": 260
},
{
"epoch": 0.012190992211310531,
"grad_norm": 2.9160430431365967,
"learning_rate": 0.00010607719841200637,
"loss": 2.6641,
"step": 270
},
{
"epoch": 0.01264251044135907,
"grad_norm": 3.4751181602478027,
"learning_rate": 9.892280158799368e-05,
"loss": 2.6787,
"step": 280
},
{
"epoch": 0.013094028671407609,
"grad_norm": 4.7677388191223145,
"learning_rate": 9.178583251506553e-05,
"loss": 2.9618,
"step": 290
},
{
"epoch": 0.013545546901456146,
"grad_norm": 5.446451663970947,
"learning_rate": 8.470106178913964e-05,
"loss": 3.2181,
"step": 300
},
{
"epoch": 0.013545546901456146,
"eval_loss": 2.802130699157715,
"eval_runtime": 181.988,
"eval_samples_per_second": 51.245,
"eval_steps_per_second": 12.814,
"step": 300
},
{
"epoch": 0.013997065131504685,
"grad_norm": 2.6413748264312744,
"learning_rate": 7.770300570103407e-05,
"loss": 2.5587,
"step": 310
},
{
"epoch": 0.014448583361553222,
"grad_norm": 2.954564094543457,
"learning_rate": 7.08257580765679e-05,
"loss": 2.5426,
"step": 320
},
{
"epoch": 0.014900101591601761,
"grad_norm": 4.0135931968688965,
"learning_rate": 6.410282417486901e-05,
"loss": 2.715,
"step": 330
},
{
"epoch": 0.015351619821650299,
"grad_norm": 4.68234395980835,
"learning_rate": 5.756695745411955e-05,
"loss": 2.6955,
"step": 340
},
{
"epoch": 0.015803138051698837,
"grad_norm": 10.842591285705566,
"learning_rate": 5.125000000000002e-05,
"loss": 3.2327,
"step": 350
},
{
"epoch": 0.015803138051698837,
"eval_loss": 2.758694648742676,
"eval_runtime": 188.1269,
"eval_samples_per_second": 49.573,
"eval_steps_per_second": 12.396,
"step": 350
},
{
"epoch": 0.016254656281747375,
"grad_norm": 3.1886544227600098,
"learning_rate": 4.518272739424847e-05,
"loss": 2.5843,
"step": 360
},
{
"epoch": 0.016706174511795915,
"grad_norm": 4.394619941711426,
"learning_rate": 3.9394698779120026e-05,
"loss": 2.6041,
"step": 370
},
{
"epoch": 0.017157692741844453,
"grad_norm": 3.7810781002044678,
"learning_rate": 3.3914112848217066e-05,
"loss": 2.6417,
"step": 380
},
{
"epoch": 0.01760921097189299,
"grad_norm": 4.610939979553223,
"learning_rate": 2.8767670465288276e-05,
"loss": 2.6743,
"step": 390
},
{
"epoch": 0.018060729201941527,
"grad_norm": 5.860191345214844,
"learning_rate": 2.3980444580304764e-05,
"loss": 3.1479,
"step": 400
},
{
"epoch": 0.018060729201941527,
"eval_loss": 2.735464572906494,
"eval_runtime": 185.0743,
"eval_samples_per_second": 50.391,
"eval_steps_per_second": 12.6,
"step": 400
},
{
"epoch": 0.018512247431990068,
"grad_norm": 2.952505588531494,
"learning_rate": 1.9575758076567897e-05,
"loss": 2.5131,
"step": 410
},
{
"epoch": 0.018963765662038605,
"grad_norm": 3.583050012588501,
"learning_rate": 1.557507014396634e-05,
"loss": 2.4948,
"step": 420
},
{
"epoch": 0.019415283892087142,
"grad_norm": 3.9211292266845703,
"learning_rate": 1.1997871731959984e-05,
"loss": 2.6896,
"step": 430
},
{
"epoch": 0.019866802122135683,
"grad_norm": 3.595341444015503,
"learning_rate": 8.861590591633399e-06,
"loss": 2.6609,
"step": 440
},
{
"epoch": 0.02031832035218422,
"grad_norm": 6.866179466247559,
"learning_rate": 6.181506369444397e-06,
"loss": 3.1241,
"step": 450
},
{
"epoch": 0.02031832035218422,
"eval_loss": 2.7266805171966553,
"eval_runtime": 186.0159,
"eval_samples_per_second": 50.135,
"eval_steps_per_second": 12.537,
"step": 450
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4059873906524160.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}