ClaimVer_LLaMA3-8B-Chat / trainer_state.json
preetam7's picture
Upload 9 files
6b62aeb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9952941176470587,
"eval_steps": 500,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.047058823529411764,
"grad_norm": 0.8513572216033936,
"learning_rate": 4.9931407070965254e-05,
"loss": 0.989,
"num_input_tokens_seen": 97856,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.5562562942504883,
"learning_rate": 4.97260046830541e-05,
"loss": 0.7203,
"num_input_tokens_seen": 192320,
"step": 10
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.44545215368270874,
"learning_rate": 4.9384919968379945e-05,
"loss": 0.5976,
"num_input_tokens_seen": 288720,
"step": 15
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.3797588050365448,
"learning_rate": 4.891002460691306e-05,
"loss": 0.5496,
"num_input_tokens_seen": 389008,
"step": 20
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.3744553327560425,
"learning_rate": 4.83039245557597e-05,
"loss": 0.512,
"num_input_tokens_seen": 485488,
"step": 25
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.33631229400634766,
"learning_rate": 4.756994574914359e-05,
"loss": 0.4675,
"num_input_tokens_seen": 582752,
"step": 30
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.3476051986217499,
"learning_rate": 4.6712115847560355e-05,
"loss": 0.4507,
"num_input_tokens_seen": 684016,
"step": 35
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.3526253402233124,
"learning_rate": 4.573514213625505e-05,
"loss": 0.436,
"num_input_tokens_seen": 782848,
"step": 40
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.34273335337638855,
"learning_rate": 4.464438569430354e-05,
"loss": 0.4339,
"num_input_tokens_seen": 885520,
"step": 45
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.31332990527153015,
"learning_rate": 4.344583197604318e-05,
"loss": 0.4056,
"num_input_tokens_seen": 985248,
"step": 50
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.38881802558898926,
"learning_rate": 4.214605796628527e-05,
"loss": 0.412,
"num_input_tokens_seen": 1086800,
"step": 55
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.37956130504608154,
"learning_rate": 4.075219608954278e-05,
"loss": 0.3917,
"num_input_tokens_seen": 1187312,
"step": 60
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.36925989389419556,
"learning_rate": 3.927189507131938e-05,
"loss": 0.3879,
"num_input_tokens_seen": 1284656,
"step": 65
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.3496692478656769,
"learning_rate": 3.7713277966230514e-05,
"loss": 0.3828,
"num_input_tokens_seen": 1380720,
"step": 70
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.3336363434791565,
"learning_rate": 3.608489758327472e-05,
"loss": 0.3897,
"num_input_tokens_seen": 1483248,
"step": 75
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.3519847095012665,
"learning_rate": 3.4395689552855955e-05,
"loss": 0.368,
"num_input_tokens_seen": 1581216,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 0.41548871994018555,
"learning_rate": 3.265492329309867e-05,
"loss": 0.367,
"num_input_tokens_seen": 1681520,
"step": 85
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.4266488552093506,
"learning_rate": 3.0872151144524595e-05,
"loss": 0.3691,
"num_input_tokens_seen": 1774368,
"step": 90
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.3932451903820038,
"learning_rate": 2.9057155952211502e-05,
"loss": 0.3611,
"num_input_tokens_seen": 1869344,
"step": 95
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.3845691382884979,
"learning_rate": 2.7219897383073373e-05,
"loss": 0.3534,
"num_input_tokens_seen": 1967664,
"step": 100
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.391476571559906,
"learning_rate": 2.537045727284232e-05,
"loss": 0.3326,
"num_input_tokens_seen": 2063040,
"step": 105
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.43011003732681274,
"learning_rate": 2.3518984302657146e-05,
"loss": 0.3415,
"num_input_tokens_seen": 2164608,
"step": 110
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.4368140995502472,
"learning_rate": 2.1675638308842145e-05,
"loss": 0.3523,
"num_input_tokens_seen": 2265696,
"step": 115
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.42045897245407104,
"learning_rate": 1.9850534531472546e-05,
"loss": 0.338,
"num_input_tokens_seen": 2365488,
"step": 120
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.4001467525959015,
"learning_rate": 1.8053688107658908e-05,
"loss": 0.3509,
"num_input_tokens_seen": 2464208,
"step": 125
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.40329447388648987,
"learning_rate": 1.6294959114140034e-05,
"loss": 0.3317,
"num_input_tokens_seen": 2563424,
"step": 130
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.4356438219547272,
"learning_rate": 1.4583998460759424e-05,
"loss": 0.3271,
"num_input_tokens_seen": 2660752,
"step": 135
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.43610551953315735,
"learning_rate": 1.2930194931731382e-05,
"loss": 0.3399,
"num_input_tokens_seen": 2757168,
"step": 140
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.44853097200393677,
"learning_rate": 1.1342623665304209e-05,
"loss": 0.3558,
"num_input_tokens_seen": 2858336,
"step": 145
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.3944084942340851,
"learning_rate": 9.829996354535172e-06,
"loss": 0.3366,
"num_input_tokens_seen": 2955472,
"step": 150
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.40302279591560364,
"learning_rate": 8.400613442446948e-06,
"loss": 0.346,
"num_input_tokens_seen": 3054528,
"step": 155
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.4172155559062958,
"learning_rate": 7.062318573891716e-06,
"loss": 0.3442,
"num_input_tokens_seen": 3152064,
"step": 160
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.42591097950935364,
"learning_rate": 5.822455554065217e-06,
"loss": 0.3475,
"num_input_tokens_seen": 3252064,
"step": 165
},
{
"epoch": 1.6,
"grad_norm": 0.4681831896305084,
"learning_rate": 4.687828049857967e-06,
"loss": 0.322,
"num_input_tokens_seen": 3346512,
"step": 170
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.42528390884399414,
"learning_rate": 3.6646622551801345e-06,
"loss": 0.3108,
"num_input_tokens_seen": 3445360,
"step": 175
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.4077628254890442,
"learning_rate": 2.75857272513132e-06,
"loss": 0.3432,
"num_input_tokens_seen": 3544432,
"step": 180
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.4058278501033783,
"learning_rate": 1.9745315664982276e-06,
"loss": 0.3236,
"num_input_tokens_seen": 3641504,
"step": 185
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.3715575635433197,
"learning_rate": 1.3168411536452152e-06,
"loss": 0.3169,
"num_input_tokens_seen": 3739504,
"step": 190
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.4317086935043335,
"learning_rate": 7.891105195175358e-07,
"loss": 0.3325,
"num_input_tokens_seen": 3841712,
"step": 195
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.3929608464241028,
"learning_rate": 3.9423555131007925e-07,
"loss": 0.3355,
"num_input_tokens_seen": 3940320,
"step": 200
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.4199908673763275,
"learning_rate": 1.343830994765982e-07,
"loss": 0.32,
"num_input_tokens_seen": 4037264,
"step": 205
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.43150097131729126,
"learning_rate": 1.0979087280141298e-08,
"loss": 0.3349,
"num_input_tokens_seen": 4133360,
"step": 210
},
{
"epoch": 1.9952941176470587,
"num_input_tokens_seen": 4172960,
"step": 212,
"total_flos": 1.8843158217490432e+17,
"train_loss": 0.39808354709508287,
"train_runtime": 2416.3268,
"train_samples_per_second": 2.814,
"train_steps_per_second": 0.088
}
],
"logging_steps": 5,
"max_steps": 212,
"num_input_tokens_seen": 4172960,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8843158217490432e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}