lora-color-tiny / checkpoint-147 /trainer_state.json
nassersala's picture
Upload folder using huggingface_hub
9e1f213 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9591836734693877,
"eval_steps": 13,
"global_step": 147,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02040816326530612,
"grad_norm": 0.7881951332092285,
"learning_rate": 2e-05,
"loss": 2.7509,
"step": 1
},
{
"epoch": 0.02040816326530612,
"eval_loss": 2.6902382373809814,
"eval_runtime": 269.5606,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 3.146,
"step": 1
},
{
"epoch": 0.04081632653061224,
"grad_norm": 0.789082407951355,
"learning_rate": 4e-05,
"loss": 2.7449,
"step": 2
},
{
"epoch": 0.061224489795918366,
"grad_norm": 0.7354114055633545,
"learning_rate": 6e-05,
"loss": 2.7164,
"step": 3
},
{
"epoch": 0.08163265306122448,
"grad_norm": 0.7292255759239197,
"learning_rate": 8e-05,
"loss": 2.7174,
"step": 4
},
{
"epoch": 0.10204081632653061,
"grad_norm": 0.6898028254508972,
"learning_rate": 0.0001,
"loss": 2.6891,
"step": 5
},
{
"epoch": 0.12244897959183673,
"grad_norm": 0.6861400604248047,
"learning_rate": 0.00012,
"loss": 2.6545,
"step": 6
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.7510350346565247,
"learning_rate": 0.00014,
"loss": 2.5656,
"step": 7
},
{
"epoch": 0.16326530612244897,
"grad_norm": 0.8011165261268616,
"learning_rate": 0.00016,
"loss": 2.4519,
"step": 8
},
{
"epoch": 0.1836734693877551,
"grad_norm": 0.8624005317687988,
"learning_rate": 0.00018,
"loss": 2.3178,
"step": 9
},
{
"epoch": 0.20408163265306123,
"grad_norm": 0.8004987835884094,
"learning_rate": 0.0002,
"loss": 2.1783,
"step": 10
},
{
"epoch": 0.22448979591836735,
"grad_norm": 0.6362400054931641,
"learning_rate": 0.000199985736255971,
"loss": 2.0252,
"step": 11
},
{
"epoch": 0.24489795918367346,
"grad_norm": 0.7930936217308044,
"learning_rate": 0.0001999429490929718,
"loss": 1.8839,
"step": 12
},
{
"epoch": 0.2653061224489796,
"grad_norm": 0.5149843096733093,
"learning_rate": 0.00019987165071710527,
"loss": 1.8064,
"step": 13
},
{
"epoch": 0.2653061224489796,
"eval_loss": 1.6734941005706787,
"eval_runtime": 271.2615,
"eval_samples_per_second": 6.249,
"eval_steps_per_second": 3.126,
"step": 13
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.42121434211730957,
"learning_rate": 0.00019977186146800707,
"loss": 1.7922,
"step": 14
},
{
"epoch": 0.30612244897959184,
"grad_norm": 0.3523242771625519,
"learning_rate": 0.0001996436098130433,
"loss": 1.7711,
"step": 15
},
{
"epoch": 0.32653061224489793,
"grad_norm": 0.3384595215320587,
"learning_rate": 0.00019948693233918952,
"loss": 1.7152,
"step": 16
},
{
"epoch": 0.3469387755102041,
"grad_norm": 0.34942421317100525,
"learning_rate": 0.00019930187374259337,
"loss": 1.7112,
"step": 17
},
{
"epoch": 0.3673469387755102,
"grad_norm": 0.31712639331817627,
"learning_rate": 0.00019908848681582391,
"loss": 1.7059,
"step": 18
},
{
"epoch": 0.3877551020408163,
"grad_norm": 0.2875436842441559,
"learning_rate": 0.00019884683243281116,
"loss": 1.6468,
"step": 19
},
{
"epoch": 0.40816326530612246,
"grad_norm": 0.24433130025863647,
"learning_rate": 0.00019857697953148037,
"loss": 1.6408,
"step": 20
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.21414674818515778,
"learning_rate": 0.00019827900509408581,
"loss": 1.616,
"step": 21
},
{
"epoch": 0.4489795918367347,
"grad_norm": 0.21537622809410095,
"learning_rate": 0.00019795299412524945,
"loss": 1.609,
"step": 22
},
{
"epoch": 0.46938775510204084,
"grad_norm": 0.2432074397802353,
"learning_rate": 0.00019759903962771156,
"loss": 1.6066,
"step": 23
},
{
"epoch": 0.4897959183673469,
"grad_norm": 0.2359839379787445,
"learning_rate": 0.00019721724257579907,
"loss": 1.5851,
"step": 24
},
{
"epoch": 0.5102040816326531,
"grad_norm": 0.22065888345241547,
"learning_rate": 0.00019680771188662044,
"loss": 1.5739,
"step": 25
},
{
"epoch": 0.5306122448979592,
"grad_norm": 0.20339132845401764,
"learning_rate": 0.0001963705643889941,
"loss": 1.5513,
"step": 26
},
{
"epoch": 0.5306122448979592,
"eval_loss": 1.4832030534744263,
"eval_runtime": 271.2449,
"eval_samples_per_second": 6.249,
"eval_steps_per_second": 3.126,
"step": 26
},
{
"epoch": 0.5510204081632653,
"grad_norm": 0.18875224888324738,
"learning_rate": 0.00019590592479012023,
"loss": 1.5378,
"step": 27
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.18564417958259583,
"learning_rate": 0.00019541392564000488,
"loss": 1.5212,
"step": 28
},
{
"epoch": 0.5918367346938775,
"grad_norm": 0.16226942837238312,
"learning_rate": 0.00019489470729364692,
"loss": 1.5391,
"step": 29
},
{
"epoch": 0.6122448979591837,
"grad_norm": 0.15650039911270142,
"learning_rate": 0.00019434841787099803,
"loss": 1.511,
"step": 30
},
{
"epoch": 0.6326530612244898,
"grad_norm": 0.15976540744304657,
"learning_rate": 0.00019377521321470805,
"loss": 1.5119,
"step": 31
},
{
"epoch": 0.6530612244897959,
"grad_norm": 0.16409288346767426,
"learning_rate": 0.00019317525684566685,
"loss": 1.4909,
"step": 32
},
{
"epoch": 0.673469387755102,
"grad_norm": 0.15468019247055054,
"learning_rate": 0.00019254871991635598,
"loss": 1.4951,
"step": 33
},
{
"epoch": 0.6938775510204082,
"grad_norm": 0.1462036371231079,
"learning_rate": 0.00019189578116202307,
"loss": 1.4643,
"step": 34
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.1541963368654251,
"learning_rate": 0.00019121662684969335,
"loss": 1.5159,
"step": 35
},
{
"epoch": 0.7346938775510204,
"grad_norm": 0.14798064529895782,
"learning_rate": 0.00019051145072503215,
"loss": 1.4741,
"step": 36
},
{
"epoch": 0.7551020408163265,
"grad_norm": 0.13914817571640015,
"learning_rate": 0.00018978045395707418,
"loss": 1.4788,
"step": 37
},
{
"epoch": 0.7755102040816326,
"grad_norm": 0.15608824789524078,
"learning_rate": 0.00018902384508083517,
"loss": 1.4687,
"step": 38
},
{
"epoch": 0.7959183673469388,
"grad_norm": 0.14460116624832153,
"learning_rate": 0.00018824183993782192,
"loss": 1.482,
"step": 39
},
{
"epoch": 0.7959183673469388,
"eval_loss": 1.411073088645935,
"eval_runtime": 271.292,
"eval_samples_per_second": 6.248,
"eval_steps_per_second": 3.126,
"step": 39
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.15740551054477692,
"learning_rate": 0.00018743466161445823,
"loss": 1.4486,
"step": 40
},
{
"epoch": 0.8367346938775511,
"grad_norm": 0.14149661362171173,
"learning_rate": 0.00018660254037844388,
"loss": 1.4353,
"step": 41
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.14034292101860046,
"learning_rate": 0.0001857457136130651,
"loss": 1.4523,
"step": 42
},
{
"epoch": 0.8775510204081632,
"grad_norm": 0.1487722396850586,
"learning_rate": 0.00018486442574947511,
"loss": 1.4095,
"step": 43
},
{
"epoch": 0.8979591836734694,
"grad_norm": 0.17400234937667847,
"learning_rate": 0.00018395892819696389,
"loss": 1.4414,
"step": 44
},
{
"epoch": 0.9183673469387755,
"grad_norm": 0.1741325408220291,
"learning_rate": 0.00018302947927123766,
"loss": 1.4379,
"step": 45
},
{
"epoch": 0.9387755102040817,
"grad_norm": 0.15319454669952393,
"learning_rate": 0.00018207634412072764,
"loss": 1.405,
"step": 46
},
{
"epoch": 0.9591836734693877,
"grad_norm": 0.15876264870166779,
"learning_rate": 0.00018109979465095013,
"loss": 1.4122,
"step": 47
},
{
"epoch": 0.9795918367346939,
"grad_norm": 0.17120805382728577,
"learning_rate": 0.00018010010944693848,
"loss": 1.4132,
"step": 48
},
{
"epoch": 1.0,
"grad_norm": 0.1436116099357605,
"learning_rate": 0.00017907757369376985,
"loss": 1.416,
"step": 49
},
{
"epoch": 1.0204081632653061,
"grad_norm": 0.1707429438829422,
"learning_rate": 0.0001780324790952092,
"loss": 1.3913,
"step": 50
},
{
"epoch": 1.0204081632653061,
"grad_norm": 0.17117524147033691,
"learning_rate": 0.00017696512379049325,
"loss": 1.3963,
"step": 51
},
{
"epoch": 1.0408163265306123,
"grad_norm": 0.13410089910030365,
"learning_rate": 0.0001758758122692791,
"loss": 1.392,
"step": 52
},
{
"epoch": 1.0408163265306123,
"eval_loss": 1.3676769733428955,
"eval_runtime": 270.8566,
"eval_samples_per_second": 6.258,
"eval_steps_per_second": 3.131,
"step": 52
},
{
"epoch": 1.0612244897959184,
"grad_norm": 0.18877607583999634,
"learning_rate": 0.00017476485528478093,
"loss": 1.3854,
"step": 53
},
{
"epoch": 1.0816326530612246,
"grad_norm": 0.1752927452325821,
"learning_rate": 0.00017363256976511972,
"loss": 1.3759,
"step": 54
},
{
"epoch": 1.1020408163265305,
"grad_norm": 0.17180170118808746,
"learning_rate": 0.000172479278722912,
"loss": 1.3614,
"step": 55
},
{
"epoch": 1.1224489795918366,
"grad_norm": 0.1640290915966034,
"learning_rate": 0.00017130531116312203,
"loss": 1.3853,
"step": 56
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.2047068476676941,
"learning_rate": 0.0001701110019892053,
"loss": 1.3699,
"step": 57
},
{
"epoch": 1.163265306122449,
"grad_norm": 0.1835869997739792,
"learning_rate": 0.00016889669190756868,
"loss": 1.3403,
"step": 58
},
{
"epoch": 1.183673469387755,
"grad_norm": 0.16733241081237793,
"learning_rate": 0.00016766272733037576,
"loss": 1.3609,
"step": 59
},
{
"epoch": 1.2040816326530612,
"grad_norm": 0.178726926445961,
"learning_rate": 0.00016640946027672392,
"loss": 1.3651,
"step": 60
},
{
"epoch": 1.2244897959183674,
"grad_norm": 0.16719630360603333,
"learning_rate": 0.00016513724827222227,
"loss": 1.3676,
"step": 61
},
{
"epoch": 1.2448979591836735,
"grad_norm": 0.15999363362789154,
"learning_rate": 0.00016384645424699835,
"loss": 1.3651,
"step": 62
},
{
"epoch": 1.2653061224489797,
"grad_norm": 0.1705988198518753,
"learning_rate": 0.00016253744643216368,
"loss": 1.3757,
"step": 63
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.14996370673179626,
"learning_rate": 0.0001612105982547663,
"loss": 1.3474,
"step": 64
},
{
"epoch": 1.306122448979592,
"grad_norm": 0.19127260148525238,
"learning_rate": 0.0001598662882312615,
"loss": 1.3414,
"step": 65
},
{
"epoch": 1.306122448979592,
"eval_loss": 1.331880807876587,
"eval_runtime": 270.8424,
"eval_samples_per_second": 6.258,
"eval_steps_per_second": 3.131,
"step": 65
},
{
"epoch": 1.3265306122448979,
"grad_norm": 0.16125527024269104,
"learning_rate": 0.00015850489985953076,
"loss": 1.3509,
"step": 66
},
{
"epoch": 1.346938775510204,
"grad_norm": 0.1979473978281021,
"learning_rate": 0.00015712682150947923,
"loss": 1.3579,
"step": 67
},
{
"epoch": 1.3673469387755102,
"grad_norm": 0.18317992985248566,
"learning_rate": 0.00015573244631224365,
"loss": 1.3341,
"step": 68
},
{
"epoch": 1.3877551020408163,
"grad_norm": 0.1646898239850998,
"learning_rate": 0.0001543221720480419,
"loss": 1.3361,
"step": 69
},
{
"epoch": 1.4081632653061225,
"grad_norm": 0.1760271042585373,
"learning_rate": 0.00015289640103269625,
"loss": 1.358,
"step": 70
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.165283203125,
"learning_rate": 0.0001514555400028629,
"loss": 1.3072,
"step": 71
},
{
"epoch": 1.4489795918367347,
"grad_norm": 0.1507076472043991,
"learning_rate": 0.00015000000000000001,
"loss": 1.3133,
"step": 72
},
{
"epoch": 1.469387755102041,
"grad_norm": 0.16913647949695587,
"learning_rate": 0.00014853019625310813,
"loss": 1.3232,
"step": 73
},
{
"epoch": 1.489795918367347,
"grad_norm": 0.18266479671001434,
"learning_rate": 0.0001470465480602756,
"loss": 1.3512,
"step": 74
},
{
"epoch": 1.510204081632653,
"grad_norm": 0.19301828742027283,
"learning_rate": 0.0001455494786690634,
"loss": 1.3241,
"step": 75
},
{
"epoch": 1.5306122448979593,
"grad_norm": 0.16109652817249298,
"learning_rate": 0.00014403941515576344,
"loss": 1.3256,
"step": 76
},
{
"epoch": 1.5510204081632653,
"grad_norm": 0.17053867876529694,
"learning_rate": 0.00014251678830356408,
"loss": 1.3162,
"step": 77
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.17348544299602509,
"learning_rate": 0.00014098203247965875,
"loss": 1.3213,
"step": 78
},
{
"epoch": 1.5714285714285714,
"eval_loss": 1.3028697967529297,
"eval_runtime": 270.8095,
"eval_samples_per_second": 6.259,
"eval_steps_per_second": 3.131,
"step": 78
},
{
"epoch": 1.5918367346938775,
"grad_norm": 0.1703907549381256,
"learning_rate": 0.00013943558551133186,
"loss": 1.3073,
"step": 79
},
{
"epoch": 1.6122448979591837,
"grad_norm": 0.17313100397586823,
"learning_rate": 0.0001378778885610576,
"loss": 1.3232,
"step": 80
},
{
"epoch": 1.6326530612244898,
"grad_norm": 0.17237025499343872,
"learning_rate": 0.00013630938600064747,
"loss": 1.3406,
"step": 81
},
{
"epoch": 1.6530612244897958,
"grad_norm": 0.19658459722995758,
"learning_rate": 0.00013473052528448201,
"loss": 1.3114,
"step": 82
},
{
"epoch": 1.6734693877551021,
"grad_norm": 0.20599938929080963,
"learning_rate": 0.0001331417568218636,
"loss": 1.3288,
"step": 83
},
{
"epoch": 1.693877551020408,
"grad_norm": 0.17759399116039276,
"learning_rate": 0.00013154353384852558,
"loss": 1.2995,
"step": 84
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.18712250888347626,
"learning_rate": 0.00012993631229733582,
"loss": 1.2895,
"step": 85
},
{
"epoch": 1.7346938775510203,
"grad_norm": 0.1991330236196518,
"learning_rate": 0.00012832055066823038,
"loss": 1.2886,
"step": 86
},
{
"epoch": 1.7551020408163265,
"grad_norm": 0.22125203907489777,
"learning_rate": 0.00012669670989741517,
"loss": 1.3233,
"step": 87
},
{
"epoch": 1.7755102040816326,
"grad_norm": 0.2052813619375229,
"learning_rate": 0.00012506525322587207,
"loss": 1.3079,
"step": 88
},
{
"epoch": 1.7959183673469388,
"grad_norm": 0.19290736317634583,
"learning_rate": 0.00012342664606720822,
"loss": 1.3174,
"step": 89
},
{
"epoch": 1.816326530612245,
"grad_norm": 0.20912542939186096,
"learning_rate": 0.00012178135587488515,
"loss": 1.2915,
"step": 90
},
{
"epoch": 1.836734693877551,
"grad_norm": 0.20760588347911835,
"learning_rate": 0.00012012985200886602,
"loss": 1.3028,
"step": 91
},
{
"epoch": 1.836734693877551,
"eval_loss": 1.2795333862304688,
"eval_runtime": 270.6525,
"eval_samples_per_second": 6.263,
"eval_steps_per_second": 3.133,
"step": 91
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.1996900886297226,
"learning_rate": 0.00011847260560171896,
"loss": 1.3119,
"step": 92
},
{
"epoch": 1.8775510204081631,
"grad_norm": 0.23766876757144928,
"learning_rate": 0.00011681008942421483,
"loss": 1.2978,
"step": 93
},
{
"epoch": 1.8979591836734695,
"grad_norm": 0.19782397150993347,
"learning_rate": 0.00011514277775045768,
"loss": 1.2955,
"step": 94
},
{
"epoch": 1.9183673469387754,
"grad_norm": 0.22519494593143463,
"learning_rate": 0.00011347114622258612,
"loss": 1.2957,
"step": 95
},
{
"epoch": 1.9387755102040818,
"grad_norm": 0.2590245306491852,
"learning_rate": 0.00011179567171508463,
"loss": 1.2809,
"step": 96
},
{
"epoch": 1.9591836734693877,
"grad_norm": 0.2235420197248459,
"learning_rate": 0.00011011683219874323,
"loss": 1.2784,
"step": 97
},
{
"epoch": 1.9795918367346939,
"grad_norm": 0.285740464925766,
"learning_rate": 0.00010843510660430447,
"loss": 1.309,
"step": 98
},
{
"epoch": 2.0,
"grad_norm": 0.20554350316524506,
"learning_rate": 0.00010675097468583652,
"loss": 1.273,
"step": 99
},
{
"epoch": 2.020408163265306,
"grad_norm": 0.24468418955802917,
"learning_rate": 0.00010506491688387127,
"loss": 1.2833,
"step": 100
},
{
"epoch": 2.020408163265306,
"grad_norm": 0.21553528308868408,
"learning_rate": 0.00010337741418834684,
"loss": 1.2669,
"step": 101
},
{
"epoch": 2.0408163265306123,
"grad_norm": 0.22015659511089325,
"learning_rate": 0.0001016889480013931,
"loss": 1.2795,
"step": 102
},
{
"epoch": 2.061224489795918,
"grad_norm": 0.2028799206018448,
"learning_rate": 0.0001,
"loss": 1.2584,
"step": 103
},
{
"epoch": 2.0816326530612246,
"grad_norm": 0.23474323749542236,
"learning_rate": 9.83110519986069e-05,
"loss": 1.2761,
"step": 104
},
{
"epoch": 2.0816326530612246,
"eval_loss": 1.2696796655654907,
"eval_runtime": 270.6586,
"eval_samples_per_second": 6.263,
"eval_steps_per_second": 3.133,
"step": 104
},
{
"epoch": 2.1020408163265305,
"grad_norm": 0.21070216596126556,
"learning_rate": 9.662258581165319e-05,
"loss": 1.2808,
"step": 105
},
{
"epoch": 2.122448979591837,
"grad_norm": 0.21867221593856812,
"learning_rate": 9.493508311612874e-05,
"loss": 1.2873,
"step": 106
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.21630822122097015,
"learning_rate": 9.324902531416349e-05,
"loss": 1.2527,
"step": 107
},
{
"epoch": 2.163265306122449,
"grad_norm": 0.2134082019329071,
"learning_rate": 9.156489339569554e-05,
"loss": 1.2755,
"step": 108
},
{
"epoch": 2.183673469387755,
"grad_norm": 0.22310714423656464,
"learning_rate": 8.98831678012568e-05,
"loss": 1.2512,
"step": 109
},
{
"epoch": 2.204081632653061,
"grad_norm": 0.2365124374628067,
"learning_rate": 8.820432828491542e-05,
"loss": 1.2725,
"step": 110
},
{
"epoch": 2.2244897959183674,
"grad_norm": 0.2086496651172638,
"learning_rate": 8.652885377741393e-05,
"loss": 1.2488,
"step": 111
},
{
"epoch": 2.2448979591836733,
"grad_norm": 0.20848101377487183,
"learning_rate": 8.485722224954237e-05,
"loss": 1.2793,
"step": 112
},
{
"epoch": 2.2653061224489797,
"grad_norm": 0.20784686505794525,
"learning_rate": 8.31899105757852e-05,
"loss": 1.2564,
"step": 113
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.21896174550056458,
"learning_rate": 8.15273943982811e-05,
"loss": 1.2515,
"step": 114
},
{
"epoch": 2.306122448979592,
"grad_norm": 0.21367855370044708,
"learning_rate": 7.987014799113397e-05,
"loss": 1.248,
"step": 115
},
{
"epoch": 2.326530612244898,
"grad_norm": 0.20891636610031128,
"learning_rate": 7.821864412511485e-05,
"loss": 1.2753,
"step": 116
},
{
"epoch": 2.3469387755102042,
"grad_norm": 0.2092975378036499,
"learning_rate": 7.65733539327918e-05,
"loss": 1.2509,
"step": 117
},
{
"epoch": 2.3469387755102042,
"eval_loss": 1.258699655532837,
"eval_runtime": 270.5384,
"eval_samples_per_second": 6.265,
"eval_steps_per_second": 3.134,
"step": 117
},
{
"epoch": 2.36734693877551,
"grad_norm": 0.1905972808599472,
"learning_rate": 7.493474677412794e-05,
"loss": 1.2516,
"step": 118
},
{
"epoch": 2.387755102040816,
"grad_norm": 0.19716158509254456,
"learning_rate": 7.330329010258483e-05,
"loss": 1.2665,
"step": 119
},
{
"epoch": 2.4081632653061225,
"grad_norm": 0.1953389048576355,
"learning_rate": 7.16794493317696e-05,
"loss": 1.2661,
"step": 120
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.1990067958831787,
"learning_rate": 7.006368770266421e-05,
"loss": 1.2619,
"step": 121
},
{
"epoch": 2.4489795918367347,
"grad_norm": 0.1954919546842575,
"learning_rate": 6.845646615147445e-05,
"loss": 1.2736,
"step": 122
},
{
"epoch": 2.4693877551020407,
"grad_norm": 0.18382853269577026,
"learning_rate": 6.685824317813643e-05,
"loss": 1.2732,
"step": 123
},
{
"epoch": 2.489795918367347,
"grad_norm": 0.18729491531848907,
"learning_rate": 6.526947471551798e-05,
"loss": 1.2509,
"step": 124
},
{
"epoch": 2.510204081632653,
"grad_norm": 0.2034740000963211,
"learning_rate": 6.369061399935255e-05,
"loss": 1.2829,
"step": 125
},
{
"epoch": 2.5306122448979593,
"grad_norm": 0.1952620893716812,
"learning_rate": 6.21221114389424e-05,
"loss": 1.2689,
"step": 126
},
{
"epoch": 2.5510204081632653,
"grad_norm": 0.1986168622970581,
"learning_rate": 6.0564414488668165e-05,
"loss": 1.2644,
"step": 127
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.19526751339435577,
"learning_rate": 5.901796752034128e-05,
"loss": 1.265,
"step": 128
},
{
"epoch": 2.5918367346938775,
"grad_norm": 0.195367693901062,
"learning_rate": 5.748321169643596e-05,
"loss": 1.2782,
"step": 129
},
{
"epoch": 2.612244897959184,
"grad_norm": 0.18351928889751434,
"learning_rate": 5.596058484423656e-05,
"loss": 1.2884,
"step": 130
},
{
"epoch": 2.612244897959184,
"eval_loss": 1.2471545934677124,
"eval_runtime": 270.4953,
"eval_samples_per_second": 6.266,
"eval_steps_per_second": 3.135,
"step": 130
},
{
"epoch": 2.63265306122449,
"grad_norm": 0.2015760987997055,
"learning_rate": 5.44505213309366e-05,
"loss": 1.2536,
"step": 131
},
{
"epoch": 2.6530612244897958,
"grad_norm": 0.1734190732240677,
"learning_rate": 5.2953451939724454e-05,
"loss": 1.2628,
"step": 132
},
{
"epoch": 2.673469387755102,
"grad_norm": 0.214066281914711,
"learning_rate": 5.146980374689192e-05,
"loss": 1.2543,
"step": 133
},
{
"epoch": 2.693877551020408,
"grad_norm": 0.17507924139499664,
"learning_rate": 5.000000000000002e-05,
"loss": 1.2665,
"step": 134
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.1778109222650528,
"learning_rate": 4.854445999713715e-05,
"loss": 1.2789,
"step": 135
},
{
"epoch": 2.7346938775510203,
"grad_norm": 0.1856827288866043,
"learning_rate": 4.710359896730379e-05,
"loss": 1.2481,
"step": 136
},
{
"epoch": 2.7551020408163263,
"grad_norm": 0.17856694757938385,
"learning_rate": 4.567782795195816e-05,
"loss": 1.2732,
"step": 137
},
{
"epoch": 2.7755102040816326,
"grad_norm": 0.21598489582538605,
"learning_rate": 4.426755368775637e-05,
"loss": 1.2525,
"step": 138
},
{
"epoch": 2.795918367346939,
"grad_norm": 0.17308436334133148,
"learning_rate": 4.287317849052075e-05,
"loss": 1.2665,
"step": 139
},
{
"epoch": 2.816326530612245,
"grad_norm": 0.19207212328910828,
"learning_rate": 4.149510014046922e-05,
"loss": 1.2681,
"step": 140
},
{
"epoch": 2.836734693877551,
"grad_norm": 0.19626958668231964,
"learning_rate": 4.013371176873849e-05,
"loss": 1.2727,
"step": 141
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.1986483484506607,
"learning_rate": 3.878940174523371e-05,
"loss": 1.2414,
"step": 142
},
{
"epoch": 2.877551020408163,
"grad_norm": 0.19369089603424072,
"learning_rate": 3.746255356783632e-05,
"loss": 1.254,
"step": 143
},
{
"epoch": 2.877551020408163,
"eval_loss": 1.2410293817520142,
"eval_runtime": 270.6762,
"eval_samples_per_second": 6.262,
"eval_steps_per_second": 3.133,
"step": 143
},
{
"epoch": 2.8979591836734695,
"grad_norm": 0.20910531282424927,
"learning_rate": 3.615354575300166e-05,
"loss": 1.2541,
"step": 144
},
{
"epoch": 2.9183673469387754,
"grad_norm": 0.19536806643009186,
"learning_rate": 3.4862751727777797e-05,
"loss": 1.2517,
"step": 145
},
{
"epoch": 2.938775510204082,
"grad_norm": 0.18630966544151306,
"learning_rate": 3.3590539723276083e-05,
"loss": 1.2473,
"step": 146
},
{
"epoch": 2.9591836734693877,
"grad_norm": 0.1874723732471466,
"learning_rate": 3.233727266962425e-05,
"loss": 1.244,
"step": 147
}
],
"logging_steps": 1,
"max_steps": 196,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 49,
"total_flos": 3.0628052408991744e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}