arco-mini-run-2 / checkpoint-1125 /trainer_state.json
appvoid's picture
Upload folder using huggingface_hub
6e4677d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 5000,
"global_step": 1125,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008888888888888889,
"grad_norm": 0.392131507396698,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.3156,
"step": 10
},
{
"epoch": 0.017777777777777778,
"grad_norm": 0.6833685636520386,
"learning_rate": 4.000000000000001e-06,
"loss": 2.5258,
"step": 20
},
{
"epoch": 0.02666666666666667,
"grad_norm": 0.4975835978984833,
"learning_rate": 6e-06,
"loss": 2.5274,
"step": 30
},
{
"epoch": 0.035555555555555556,
"grad_norm": 0.47904446721076965,
"learning_rate": 8.000000000000001e-06,
"loss": 2.2199,
"step": 40
},
{
"epoch": 0.044444444444444446,
"grad_norm": 0.5792201161384583,
"learning_rate": 1e-05,
"loss": 2.3423,
"step": 50
},
{
"epoch": 0.05333333333333334,
"grad_norm": 0.48581650853157043,
"learning_rate": 1.2e-05,
"loss": 2.3571,
"step": 60
},
{
"epoch": 0.06222222222222222,
"grad_norm": 0.4770135283470154,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.4415,
"step": 70
},
{
"epoch": 0.07111111111111111,
"grad_norm": 0.6430277824401855,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.5717,
"step": 80
},
{
"epoch": 0.08,
"grad_norm": 0.5969924926757812,
"learning_rate": 1.8e-05,
"loss": 2.2712,
"step": 90
},
{
"epoch": 0.08888888888888889,
"grad_norm": 0.5458263754844666,
"learning_rate": 2e-05,
"loss": 2.1937,
"step": 100
},
{
"epoch": 0.09777777777777778,
"grad_norm": 0.49687811732292175,
"learning_rate": 2.2000000000000003e-05,
"loss": 2.3534,
"step": 110
},
{
"epoch": 0.10666666666666667,
"grad_norm": 0.5264276266098022,
"learning_rate": 2.4e-05,
"loss": 2.3235,
"step": 120
},
{
"epoch": 0.11555555555555555,
"grad_norm": 0.6222357749938965,
"learning_rate": 2.6000000000000002e-05,
"loss": 2.484,
"step": 130
},
{
"epoch": 0.12444444444444444,
"grad_norm": 0.5594074130058289,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.3924,
"step": 140
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.5403823852539062,
"learning_rate": 3e-05,
"loss": 2.043,
"step": 150
},
{
"epoch": 0.14222222222222222,
"grad_norm": 0.8265597820281982,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.4271,
"step": 160
},
{
"epoch": 0.1511111111111111,
"grad_norm": 0.4910406172275543,
"learning_rate": 3.4000000000000007e-05,
"loss": 2.2711,
"step": 170
},
{
"epoch": 0.16,
"grad_norm": 0.6300191283226013,
"learning_rate": 3.6e-05,
"loss": 2.4088,
"step": 180
},
{
"epoch": 0.1688888888888889,
"grad_norm": 0.9516507387161255,
"learning_rate": 3.8e-05,
"loss": 2.2007,
"step": 190
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.6081224679946899,
"learning_rate": 4e-05,
"loss": 2.2574,
"step": 200
},
{
"epoch": 0.18666666666666668,
"grad_norm": 0.670280396938324,
"learning_rate": 4.2e-05,
"loss": 2.1538,
"step": 210
},
{
"epoch": 0.19555555555555557,
"grad_norm": 0.5967269539833069,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.3559,
"step": 220
},
{
"epoch": 0.20444444444444446,
"grad_norm": 0.6861779093742371,
"learning_rate": 4.600000000000001e-05,
"loss": 2.2649,
"step": 230
},
{
"epoch": 0.21333333333333335,
"grad_norm": 0.7942169308662415,
"learning_rate": 4.8e-05,
"loss": 2.4453,
"step": 240
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.686896562576294,
"learning_rate": 5e-05,
"loss": 2.272,
"step": 250
},
{
"epoch": 0.2311111111111111,
"grad_norm": 0.6957288980484009,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.2378,
"step": 260
},
{
"epoch": 0.24,
"grad_norm": 0.5659416317939758,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.2169,
"step": 270
},
{
"epoch": 0.24888888888888888,
"grad_norm": 0.6357027292251587,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.3081,
"step": 280
},
{
"epoch": 0.2577777777777778,
"grad_norm": 0.561445951461792,
"learning_rate": 5.8e-05,
"loss": 2.3665,
"step": 290
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.8563527464866638,
"learning_rate": 6e-05,
"loss": 2.2106,
"step": 300
},
{
"epoch": 0.27555555555555555,
"grad_norm": 0.7866883873939514,
"learning_rate": 6.2e-05,
"loss": 2.0944,
"step": 310
},
{
"epoch": 0.28444444444444444,
"grad_norm": 0.9338068962097168,
"learning_rate": 6.400000000000001e-05,
"loss": 2.2693,
"step": 320
},
{
"epoch": 0.29333333333333333,
"grad_norm": 0.7246842384338379,
"learning_rate": 6.6e-05,
"loss": 2.2881,
"step": 330
},
{
"epoch": 0.3022222222222222,
"grad_norm": 1.0052026510238647,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3067,
"step": 340
},
{
"epoch": 0.3111111111111111,
"grad_norm": 0.6183302998542786,
"learning_rate": 7e-05,
"loss": 2.3916,
"step": 350
},
{
"epoch": 0.32,
"grad_norm": 0.7271236777305603,
"learning_rate": 7.2e-05,
"loss": 2.4863,
"step": 360
},
{
"epoch": 0.3288888888888889,
"grad_norm": 0.8021159768104553,
"learning_rate": 7.4e-05,
"loss": 2.5189,
"step": 370
},
{
"epoch": 0.3377777777777778,
"grad_norm": 0.670994758605957,
"learning_rate": 7.6e-05,
"loss": 2.299,
"step": 380
},
{
"epoch": 0.3466666666666667,
"grad_norm": 0.6826931238174438,
"learning_rate": 7.800000000000001e-05,
"loss": 2.2439,
"step": 390
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.6512673497200012,
"learning_rate": 8e-05,
"loss": 2.2511,
"step": 400
},
{
"epoch": 0.36444444444444446,
"grad_norm": 0.6105847358703613,
"learning_rate": 8.2e-05,
"loss": 2.2597,
"step": 410
},
{
"epoch": 0.37333333333333335,
"grad_norm": 0.7471911907196045,
"learning_rate": 8.4e-05,
"loss": 2.3652,
"step": 420
},
{
"epoch": 0.38222222222222224,
"grad_norm": 0.6970444321632385,
"learning_rate": 8.6e-05,
"loss": 2.3259,
"step": 430
},
{
"epoch": 0.39111111111111113,
"grad_norm": 0.6674960255622864,
"learning_rate": 8.800000000000001e-05,
"loss": 2.3558,
"step": 440
},
{
"epoch": 0.4,
"grad_norm": 0.7472724318504333,
"learning_rate": 9e-05,
"loss": 2.2086,
"step": 450
},
{
"epoch": 0.4088888888888889,
"grad_norm": 0.7264606356620789,
"learning_rate": 9.200000000000001e-05,
"loss": 2.084,
"step": 460
},
{
"epoch": 0.4177777777777778,
"grad_norm": 0.6516128182411194,
"learning_rate": 9.4e-05,
"loss": 2.0929,
"step": 470
},
{
"epoch": 0.4266666666666667,
"grad_norm": 0.8081138134002686,
"learning_rate": 9.6e-05,
"loss": 2.1653,
"step": 480
},
{
"epoch": 0.43555555555555553,
"grad_norm": 0.8400042653083801,
"learning_rate": 9.8e-05,
"loss": 2.3939,
"step": 490
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.4789764881134033,
"learning_rate": 0.0001,
"loss": 2.2763,
"step": 500
},
{
"epoch": 0.4533333333333333,
"grad_norm": 0.7314158082008362,
"learning_rate": 9.84e-05,
"loss": 2.3742,
"step": 510
},
{
"epoch": 0.4622222222222222,
"grad_norm": 0.5646970272064209,
"learning_rate": 9.680000000000001e-05,
"loss": 2.1478,
"step": 520
},
{
"epoch": 0.4711111111111111,
"grad_norm": 0.4644782841205597,
"learning_rate": 9.52e-05,
"loss": 2.2953,
"step": 530
},
{
"epoch": 0.48,
"grad_norm": 0.6332143545150757,
"learning_rate": 9.360000000000001e-05,
"loss": 2.2399,
"step": 540
},
{
"epoch": 0.4888888888888889,
"grad_norm": 0.706084668636322,
"learning_rate": 9.200000000000001e-05,
"loss": 2.2324,
"step": 550
},
{
"epoch": 0.49777777777777776,
"grad_norm": 0.6272305250167847,
"learning_rate": 9.04e-05,
"loss": 2.2321,
"step": 560
},
{
"epoch": 0.5066666666666667,
"grad_norm": 0.5707433223724365,
"learning_rate": 8.88e-05,
"loss": 2.2092,
"step": 570
},
{
"epoch": 0.5155555555555555,
"grad_norm": 0.660696268081665,
"learning_rate": 8.72e-05,
"loss": 1.9807,
"step": 580
},
{
"epoch": 0.5244444444444445,
"grad_norm": 0.5993101000785828,
"learning_rate": 8.560000000000001e-05,
"loss": 1.9566,
"step": 590
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.5725361704826355,
"learning_rate": 8.4e-05,
"loss": 2.1722,
"step": 600
},
{
"epoch": 0.5422222222222223,
"grad_norm": 0.651642918586731,
"learning_rate": 8.24e-05,
"loss": 2.1262,
"step": 610
},
{
"epoch": 0.5511111111111111,
"grad_norm": 0.5352093577384949,
"learning_rate": 8.080000000000001e-05,
"loss": 2.1979,
"step": 620
},
{
"epoch": 0.56,
"grad_norm": 0.5744340419769287,
"learning_rate": 7.920000000000001e-05,
"loss": 2.0399,
"step": 630
},
{
"epoch": 0.5688888888888889,
"grad_norm": 0.6103332042694092,
"learning_rate": 7.76e-05,
"loss": 2.2209,
"step": 640
},
{
"epoch": 0.5777777777777777,
"grad_norm": 0.6434329152107239,
"learning_rate": 7.6e-05,
"loss": 2.379,
"step": 650
},
{
"epoch": 0.5866666666666667,
"grad_norm": 0.5661439299583435,
"learning_rate": 7.44e-05,
"loss": 2.1698,
"step": 660
},
{
"epoch": 0.5955555555555555,
"grad_norm": 0.5890876054763794,
"learning_rate": 7.280000000000001e-05,
"loss": 2.058,
"step": 670
},
{
"epoch": 0.6044444444444445,
"grad_norm": 0.4725685715675354,
"learning_rate": 7.12e-05,
"loss": 2.0658,
"step": 680
},
{
"epoch": 0.6133333333333333,
"grad_norm": 0.6357767581939697,
"learning_rate": 6.96e-05,
"loss": 2.1107,
"step": 690
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.6726393699645996,
"learning_rate": 6.800000000000001e-05,
"loss": 2.1904,
"step": 700
},
{
"epoch": 0.6311111111111111,
"grad_norm": 0.5009137392044067,
"learning_rate": 6.64e-05,
"loss": 2.0765,
"step": 710
},
{
"epoch": 0.64,
"grad_norm": 0.7313541173934937,
"learning_rate": 6.48e-05,
"loss": 2.3566,
"step": 720
},
{
"epoch": 0.6488888888888888,
"grad_norm": 0.7757258415222168,
"learning_rate": 6.32e-05,
"loss": 2.2077,
"step": 730
},
{
"epoch": 0.6577777777777778,
"grad_norm": 0.5176954865455627,
"learning_rate": 6.16e-05,
"loss": 2.1287,
"step": 740
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.6251640915870667,
"learning_rate": 6e-05,
"loss": 2.1726,
"step": 750
},
{
"epoch": 0.6755555555555556,
"grad_norm": 0.5190748572349548,
"learning_rate": 5.8399999999999997e-05,
"loss": 2.1528,
"step": 760
},
{
"epoch": 0.6844444444444444,
"grad_norm": 0.6299232840538025,
"learning_rate": 5.68e-05,
"loss": 1.9734,
"step": 770
},
{
"epoch": 0.6933333333333334,
"grad_norm": 0.6707553863525391,
"learning_rate": 5.520000000000001e-05,
"loss": 1.9959,
"step": 780
},
{
"epoch": 0.7022222222222222,
"grad_norm": 0.5231025815010071,
"learning_rate": 5.360000000000001e-05,
"loss": 2.0261,
"step": 790
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.489175021648407,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.1987,
"step": 800
},
{
"epoch": 0.72,
"grad_norm": 0.6425113677978516,
"learning_rate": 5.0400000000000005e-05,
"loss": 2.0759,
"step": 810
},
{
"epoch": 0.7288888888888889,
"grad_norm": 0.6148718595504761,
"learning_rate": 4.88e-05,
"loss": 2.158,
"step": 820
},
{
"epoch": 0.7377777777777778,
"grad_norm": 0.7482603192329407,
"learning_rate": 4.72e-05,
"loss": 1.9579,
"step": 830
},
{
"epoch": 0.7466666666666667,
"grad_norm": 0.5650503039360046,
"learning_rate": 4.5600000000000004e-05,
"loss": 2.1253,
"step": 840
},
{
"epoch": 0.7555555555555555,
"grad_norm": 0.6965321898460388,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.1034,
"step": 850
},
{
"epoch": 0.7644444444444445,
"grad_norm": 0.5848603844642639,
"learning_rate": 4.24e-05,
"loss": 2.0139,
"step": 860
},
{
"epoch": 0.7733333333333333,
"grad_norm": 0.566116988658905,
"learning_rate": 4.08e-05,
"loss": 2.0551,
"step": 870
},
{
"epoch": 0.7822222222222223,
"grad_norm": 0.5044224858283997,
"learning_rate": 3.9200000000000004e-05,
"loss": 1.9789,
"step": 880
},
{
"epoch": 0.7911111111111111,
"grad_norm": 0.566875159740448,
"learning_rate": 3.76e-05,
"loss": 2.3283,
"step": 890
},
{
"epoch": 0.8,
"grad_norm": 0.5149425268173218,
"learning_rate": 3.6e-05,
"loss": 1.9651,
"step": 900
},
{
"epoch": 0.8088888888888889,
"grad_norm": 0.5834816694259644,
"learning_rate": 3.4399999999999996e-05,
"loss": 1.9825,
"step": 910
},
{
"epoch": 0.8177777777777778,
"grad_norm": 0.7403817772865295,
"learning_rate": 3.2800000000000004e-05,
"loss": 2.0542,
"step": 920
},
{
"epoch": 0.8266666666666667,
"grad_norm": 0.6044632792472839,
"learning_rate": 3.12e-05,
"loss": 2.2774,
"step": 930
},
{
"epoch": 0.8355555555555556,
"grad_norm": 0.6258851885795593,
"learning_rate": 2.96e-05,
"loss": 2.1449,
"step": 940
},
{
"epoch": 0.8444444444444444,
"grad_norm": 0.6421411037445068,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.0822,
"step": 950
},
{
"epoch": 0.8533333333333334,
"grad_norm": 0.34951749444007874,
"learning_rate": 2.64e-05,
"loss": 1.9521,
"step": 960
},
{
"epoch": 0.8622222222222222,
"grad_norm": 0.5875269770622253,
"learning_rate": 2.48e-05,
"loss": 2.0813,
"step": 970
},
{
"epoch": 0.8711111111111111,
"grad_norm": 0.5003840327262878,
"learning_rate": 2.32e-05,
"loss": 1.9155,
"step": 980
},
{
"epoch": 0.88,
"grad_norm": 0.4974331259727478,
"learning_rate": 2.16e-05,
"loss": 2.0593,
"step": 990
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.505962610244751,
"learning_rate": 2e-05,
"loss": 2.0306,
"step": 1000
},
{
"epoch": 0.8977777777777778,
"grad_norm": 0.5298508405685425,
"learning_rate": 1.84e-05,
"loss": 2.136,
"step": 1010
},
{
"epoch": 0.9066666666666666,
"grad_norm": 0.4652698338031769,
"learning_rate": 1.6800000000000002e-05,
"loss": 1.9021,
"step": 1020
},
{
"epoch": 0.9155555555555556,
"grad_norm": 0.5339078307151794,
"learning_rate": 1.52e-05,
"loss": 2.0262,
"step": 1030
},
{
"epoch": 0.9244444444444444,
"grad_norm": 0.5623987317085266,
"learning_rate": 1.3600000000000002e-05,
"loss": 1.8597,
"step": 1040
},
{
"epoch": 0.9333333333333333,
"grad_norm": 0.689896285533905,
"learning_rate": 1.2e-05,
"loss": 2.1377,
"step": 1050
},
{
"epoch": 0.9422222222222222,
"grad_norm": 0.460510790348053,
"learning_rate": 1.04e-05,
"loss": 1.889,
"step": 1060
},
{
"epoch": 0.9511111111111111,
"grad_norm": 0.5239367485046387,
"learning_rate": 8.8e-06,
"loss": 2.1753,
"step": 1070
},
{
"epoch": 0.96,
"grad_norm": 0.5576533079147339,
"learning_rate": 7.2e-06,
"loss": 2.1375,
"step": 1080
},
{
"epoch": 0.9688888888888889,
"grad_norm": 0.5585991144180298,
"learning_rate": 5.600000000000001e-06,
"loss": 1.9434,
"step": 1090
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.5961203575134277,
"learning_rate": 4.000000000000001e-06,
"loss": 2.0194,
"step": 1100
},
{
"epoch": 0.9866666666666667,
"grad_norm": 0.6022922396659851,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.9679,
"step": 1110
},
{
"epoch": 0.9955555555555555,
"grad_norm": 0.4506663382053375,
"learning_rate": 8.000000000000001e-07,
"loss": 2.1243,
"step": 1120
}
],
"logging_steps": 10,
"max_steps": 1125,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.4276096434176e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}