aleegis12's picture
Training in progress, step 50, checkpoint
a9e4a24 verified
raw
history blame
9.9 kB
{
"best_metric": 1.349143624305725,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.7299270072992701,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014598540145985401,
"grad_norm": 0.3314630389213562,
"learning_rate": 1e-05,
"loss": 1.181,
"step": 1
},
{
"epoch": 0.014598540145985401,
"eval_loss": 1.5133534669876099,
"eval_runtime": 9.2058,
"eval_samples_per_second": 12.601,
"eval_steps_per_second": 3.15,
"step": 1
},
{
"epoch": 0.029197080291970802,
"grad_norm": 0.22670935094356537,
"learning_rate": 2e-05,
"loss": 1.4452,
"step": 2
},
{
"epoch": 0.043795620437956206,
"grad_norm": 0.3789464831352234,
"learning_rate": 3e-05,
"loss": 1.4232,
"step": 3
},
{
"epoch": 0.058394160583941604,
"grad_norm": 0.2621401250362396,
"learning_rate": 4e-05,
"loss": 1.4073,
"step": 4
},
{
"epoch": 0.072992700729927,
"grad_norm": 0.25601255893707275,
"learning_rate": 5e-05,
"loss": 1.344,
"step": 5
},
{
"epoch": 0.08759124087591241,
"grad_norm": 0.2619723677635193,
"learning_rate": 6e-05,
"loss": 1.3306,
"step": 6
},
{
"epoch": 0.10218978102189781,
"grad_norm": 0.2971075177192688,
"learning_rate": 7e-05,
"loss": 1.4307,
"step": 7
},
{
"epoch": 0.11678832116788321,
"grad_norm": 0.3079770803451538,
"learning_rate": 8e-05,
"loss": 1.5267,
"step": 8
},
{
"epoch": 0.13138686131386862,
"grad_norm": 0.3286444842815399,
"learning_rate": 9e-05,
"loss": 1.5575,
"step": 9
},
{
"epoch": 0.145985401459854,
"grad_norm": 0.31285712122917175,
"learning_rate": 0.0001,
"loss": 1.5352,
"step": 10
},
{
"epoch": 0.16058394160583941,
"grad_norm": 0.34175583720207214,
"learning_rate": 9.999316524962345e-05,
"loss": 1.3777,
"step": 11
},
{
"epoch": 0.17518248175182483,
"grad_norm": 0.3486191928386688,
"learning_rate": 9.997266286704631e-05,
"loss": 1.5109,
"step": 12
},
{
"epoch": 0.1897810218978102,
"grad_norm": 0.3993035852909088,
"learning_rate": 9.993849845741524e-05,
"loss": 1.6024,
"step": 13
},
{
"epoch": 0.20437956204379562,
"grad_norm": 0.4641835391521454,
"learning_rate": 9.989068136093873e-05,
"loss": 1.6562,
"step": 14
},
{
"epoch": 0.21897810218978103,
"grad_norm": 0.5170352458953857,
"learning_rate": 9.98292246503335e-05,
"loss": 1.5123,
"step": 15
},
{
"epoch": 0.23357664233576642,
"grad_norm": 0.7671294212341309,
"learning_rate": 9.975414512725057e-05,
"loss": 1.5528,
"step": 16
},
{
"epoch": 0.24817518248175183,
"grad_norm": 1.0964692831039429,
"learning_rate": 9.966546331768191e-05,
"loss": 1.5627,
"step": 17
},
{
"epoch": 0.26277372262773724,
"grad_norm": 0.22408375144004822,
"learning_rate": 9.956320346634876e-05,
"loss": 1.1167,
"step": 18
},
{
"epoch": 0.2773722627737226,
"grad_norm": 0.27535220980644226,
"learning_rate": 9.944739353007344e-05,
"loss": 1.3162,
"step": 19
},
{
"epoch": 0.291970802919708,
"grad_norm": 0.32067593932151794,
"learning_rate": 9.931806517013612e-05,
"loss": 1.4931,
"step": 20
},
{
"epoch": 0.30656934306569344,
"grad_norm": 0.2732066810131073,
"learning_rate": 9.917525374361912e-05,
"loss": 1.4165,
"step": 21
},
{
"epoch": 0.32116788321167883,
"grad_norm": 0.2587907314300537,
"learning_rate": 9.901899829374047e-05,
"loss": 1.3312,
"step": 22
},
{
"epoch": 0.3357664233576642,
"grad_norm": 0.27388066053390503,
"learning_rate": 9.884934153917997e-05,
"loss": 1.2406,
"step": 23
},
{
"epoch": 0.35036496350364965,
"grad_norm": 0.2729540467262268,
"learning_rate": 9.86663298624003e-05,
"loss": 1.2964,
"step": 24
},
{
"epoch": 0.36496350364963503,
"grad_norm": 0.2680898904800415,
"learning_rate": 9.847001329696653e-05,
"loss": 1.3259,
"step": 25
},
{
"epoch": 0.3795620437956204,
"grad_norm": 0.27648648619651794,
"learning_rate": 9.826044551386744e-05,
"loss": 1.5216,
"step": 26
},
{
"epoch": 0.39416058394160586,
"grad_norm": 0.2837010324001312,
"learning_rate": 9.803768380684242e-05,
"loss": 1.4691,
"step": 27
},
{
"epoch": 0.40875912408759124,
"grad_norm": 0.30292704701423645,
"learning_rate": 9.780178907671789e-05,
"loss": 1.4608,
"step": 28
},
{
"epoch": 0.4233576642335766,
"grad_norm": 0.302458792924881,
"learning_rate": 9.755282581475769e-05,
"loss": 1.2839,
"step": 29
},
{
"epoch": 0.43795620437956206,
"grad_norm": 0.3292553424835205,
"learning_rate": 9.729086208503174e-05,
"loss": 1.5101,
"step": 30
},
{
"epoch": 0.45255474452554745,
"grad_norm": 0.4009181559085846,
"learning_rate": 9.701596950580806e-05,
"loss": 1.5497,
"step": 31
},
{
"epoch": 0.46715328467153283,
"grad_norm": 0.42407190799713135,
"learning_rate": 9.672822322997305e-05,
"loss": 1.3821,
"step": 32
},
{
"epoch": 0.48175182481751827,
"grad_norm": 0.595664918422699,
"learning_rate": 9.642770192448536e-05,
"loss": 1.2652,
"step": 33
},
{
"epoch": 0.49635036496350365,
"grad_norm": 1.1629176139831543,
"learning_rate": 9.611448774886924e-05,
"loss": 1.7652,
"step": 34
},
{
"epoch": 0.5109489051094891,
"grad_norm": 0.23664163053035736,
"learning_rate": 9.578866633275288e-05,
"loss": 1.0537,
"step": 35
},
{
"epoch": 0.5255474452554745,
"grad_norm": 0.29486626386642456,
"learning_rate": 9.545032675245813e-05,
"loss": 1.2419,
"step": 36
},
{
"epoch": 0.5401459854014599,
"grad_norm": 0.3429765999317169,
"learning_rate": 9.509956150664796e-05,
"loss": 1.3914,
"step": 37
},
{
"epoch": 0.5547445255474452,
"grad_norm": 0.31401655077934265,
"learning_rate": 9.473646649103818e-05,
"loss": 1.3065,
"step": 38
},
{
"epoch": 0.5693430656934306,
"grad_norm": 0.2617841064929962,
"learning_rate": 9.43611409721806e-05,
"loss": 1.4523,
"step": 39
},
{
"epoch": 0.583941605839416,
"grad_norm": 0.30883294343948364,
"learning_rate": 9.397368756032445e-05,
"loss": 1.4265,
"step": 40
},
{
"epoch": 0.5985401459854015,
"grad_norm": 0.24459517002105713,
"learning_rate": 9.357421218136386e-05,
"loss": 1.2564,
"step": 41
},
{
"epoch": 0.6131386861313869,
"grad_norm": 0.2322646975517273,
"learning_rate": 9.316282404787871e-05,
"loss": 1.2948,
"step": 42
},
{
"epoch": 0.6277372262773723,
"grad_norm": 0.2812507152557373,
"learning_rate": 9.273963562927695e-05,
"loss": 1.6696,
"step": 43
},
{
"epoch": 0.6423357664233577,
"grad_norm": 0.24903182685375214,
"learning_rate": 9.230476262104677e-05,
"loss": 1.5108,
"step": 44
},
{
"epoch": 0.656934306569343,
"grad_norm": 0.2567322254180908,
"learning_rate": 9.185832391312644e-05,
"loss": 1.5481,
"step": 45
},
{
"epoch": 0.6715328467153284,
"grad_norm": 0.2882676124572754,
"learning_rate": 9.140044155740101e-05,
"loss": 1.5023,
"step": 46
},
{
"epoch": 0.6861313868613139,
"grad_norm": 0.307873010635376,
"learning_rate": 9.093124073433463e-05,
"loss": 1.6597,
"step": 47
},
{
"epoch": 0.7007299270072993,
"grad_norm": 0.32634541392326355,
"learning_rate": 9.045084971874738e-05,
"loss": 1.3647,
"step": 48
},
{
"epoch": 0.7153284671532847,
"grad_norm": 0.4027782678604126,
"learning_rate": 8.995939984474624e-05,
"loss": 1.4353,
"step": 49
},
{
"epoch": 0.7299270072992701,
"grad_norm": 0.5115939378738403,
"learning_rate": 8.945702546981969e-05,
"loss": 1.544,
"step": 50
},
{
"epoch": 0.7299270072992701,
"eval_loss": 1.349143624305725,
"eval_runtime": 9.4009,
"eval_samples_per_second": 12.339,
"eval_steps_per_second": 3.085,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.749293335117824e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}