|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9795918367346939, |
|
"eval_steps": 13, |
|
"global_step": 98, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02040816326530612, |
|
"grad_norm": 0.7881951332092285, |
|
"learning_rate": 2e-05, |
|
"loss": 2.7509, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02040816326530612, |
|
"eval_loss": 2.6902382373809814, |
|
"eval_runtime": 269.5606, |
|
"eval_samples_per_second": 6.288, |
|
"eval_steps_per_second": 3.146, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04081632653061224, |
|
"grad_norm": 0.789082407951355, |
|
"learning_rate": 4e-05, |
|
"loss": 2.7449, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.061224489795918366, |
|
"grad_norm": 0.7354114055633545, |
|
"learning_rate": 6e-05, |
|
"loss": 2.7164, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.08163265306122448, |
|
"grad_norm": 0.7292255759239197, |
|
"learning_rate": 8e-05, |
|
"loss": 2.7174, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.10204081632653061, |
|
"grad_norm": 0.6898028254508972, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6891, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12244897959183673, |
|
"grad_norm": 0.6861400604248047, |
|
"learning_rate": 0.00012, |
|
"loss": 2.6545, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 0.7510350346565247, |
|
"learning_rate": 0.00014, |
|
"loss": 2.5656, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.16326530612244897, |
|
"grad_norm": 0.8011165261268616, |
|
"learning_rate": 0.00016, |
|
"loss": 2.4519, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1836734693877551, |
|
"grad_norm": 0.8624005317687988, |
|
"learning_rate": 0.00018, |
|
"loss": 2.3178, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.20408163265306123, |
|
"grad_norm": 0.8004987835884094, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1783, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22448979591836735, |
|
"grad_norm": 0.6362400054931641, |
|
"learning_rate": 0.000199985736255971, |
|
"loss": 2.0252, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.24489795918367346, |
|
"grad_norm": 0.7930936217308044, |
|
"learning_rate": 0.0001999429490929718, |
|
"loss": 1.8839, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2653061224489796, |
|
"grad_norm": 0.5149843096733093, |
|
"learning_rate": 0.00019987165071710527, |
|
"loss": 1.8064, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2653061224489796, |
|
"eval_loss": 1.6734941005706787, |
|
"eval_runtime": 271.2615, |
|
"eval_samples_per_second": 6.249, |
|
"eval_steps_per_second": 3.126, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.42121434211730957, |
|
"learning_rate": 0.00019977186146800707, |
|
"loss": 1.7922, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.30612244897959184, |
|
"grad_norm": 0.3523242771625519, |
|
"learning_rate": 0.0001996436098130433, |
|
"loss": 1.7711, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.32653061224489793, |
|
"grad_norm": 0.3384595215320587, |
|
"learning_rate": 0.00019948693233918952, |
|
"loss": 1.7152, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3469387755102041, |
|
"grad_norm": 0.34942421317100525, |
|
"learning_rate": 0.00019930187374259337, |
|
"loss": 1.7112, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3673469387755102, |
|
"grad_norm": 0.31712639331817627, |
|
"learning_rate": 0.00019908848681582391, |
|
"loss": 1.7059, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3877551020408163, |
|
"grad_norm": 0.2875436842441559, |
|
"learning_rate": 0.00019884683243281116, |
|
"loss": 1.6468, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.40816326530612246, |
|
"grad_norm": 0.24433130025863647, |
|
"learning_rate": 0.00019857697953148037, |
|
"loss": 1.6408, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.21414674818515778, |
|
"learning_rate": 0.00019827900509408581, |
|
"loss": 1.616, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4489795918367347, |
|
"grad_norm": 0.21537622809410095, |
|
"learning_rate": 0.00019795299412524945, |
|
"loss": 1.609, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.46938775510204084, |
|
"grad_norm": 0.2432074397802353, |
|
"learning_rate": 0.00019759903962771156, |
|
"loss": 1.6066, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4897959183673469, |
|
"grad_norm": 0.2359839379787445, |
|
"learning_rate": 0.00019721724257579907, |
|
"loss": 1.5851, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5102040816326531, |
|
"grad_norm": 0.22065888345241547, |
|
"learning_rate": 0.00019680771188662044, |
|
"loss": 1.5739, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5306122448979592, |
|
"grad_norm": 0.20339132845401764, |
|
"learning_rate": 0.0001963705643889941, |
|
"loss": 1.5513, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5306122448979592, |
|
"eval_loss": 1.4832030534744263, |
|
"eval_runtime": 271.2449, |
|
"eval_samples_per_second": 6.249, |
|
"eval_steps_per_second": 3.126, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5510204081632653, |
|
"grad_norm": 0.18875224888324738, |
|
"learning_rate": 0.00019590592479012023, |
|
"loss": 1.5378, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.18564417958259583, |
|
"learning_rate": 0.00019541392564000488, |
|
"loss": 1.5212, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5918367346938775, |
|
"grad_norm": 0.16226942837238312, |
|
"learning_rate": 0.00019489470729364692, |
|
"loss": 1.5391, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6122448979591837, |
|
"grad_norm": 0.15650039911270142, |
|
"learning_rate": 0.00019434841787099803, |
|
"loss": 1.511, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6326530612244898, |
|
"grad_norm": 0.15976540744304657, |
|
"learning_rate": 0.00019377521321470805, |
|
"loss": 1.5119, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.6530612244897959, |
|
"grad_norm": 0.16409288346767426, |
|
"learning_rate": 0.00019317525684566685, |
|
"loss": 1.4909, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.673469387755102, |
|
"grad_norm": 0.15468019247055054, |
|
"learning_rate": 0.00019254871991635598, |
|
"loss": 1.4951, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6938775510204082, |
|
"grad_norm": 0.1462036371231079, |
|
"learning_rate": 0.00019189578116202307, |
|
"loss": 1.4643, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.1541963368654251, |
|
"learning_rate": 0.00019121662684969335, |
|
"loss": 1.5159, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.7346938775510204, |
|
"grad_norm": 0.14798064529895782, |
|
"learning_rate": 0.00019051145072503215, |
|
"loss": 1.4741, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.7551020408163265, |
|
"grad_norm": 0.13914817571640015, |
|
"learning_rate": 0.00018978045395707418, |
|
"loss": 1.4788, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.7755102040816326, |
|
"grad_norm": 0.15608824789524078, |
|
"learning_rate": 0.00018902384508083517, |
|
"loss": 1.4687, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7959183673469388, |
|
"grad_norm": 0.14460116624832153, |
|
"learning_rate": 0.00018824183993782192, |
|
"loss": 1.482, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7959183673469388, |
|
"eval_loss": 1.411073088645935, |
|
"eval_runtime": 271.292, |
|
"eval_samples_per_second": 6.248, |
|
"eval_steps_per_second": 3.126, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 0.15740551054477692, |
|
"learning_rate": 0.00018743466161445823, |
|
"loss": 1.4486, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8367346938775511, |
|
"grad_norm": 0.14149661362171173, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.4353, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 0.14034292101860046, |
|
"learning_rate": 0.0001857457136130651, |
|
"loss": 1.4523, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.8775510204081632, |
|
"grad_norm": 0.1487722396850586, |
|
"learning_rate": 0.00018486442574947511, |
|
"loss": 1.4095, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8979591836734694, |
|
"grad_norm": 0.17400234937667847, |
|
"learning_rate": 0.00018395892819696389, |
|
"loss": 1.4414, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.9183673469387755, |
|
"grad_norm": 0.1741325408220291, |
|
"learning_rate": 0.00018302947927123766, |
|
"loss": 1.4379, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.9387755102040817, |
|
"grad_norm": 0.15319454669952393, |
|
"learning_rate": 0.00018207634412072764, |
|
"loss": 1.405, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.9591836734693877, |
|
"grad_norm": 0.15876264870166779, |
|
"learning_rate": 0.00018109979465095013, |
|
"loss": 1.4122, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.9795918367346939, |
|
"grad_norm": 0.17120805382728577, |
|
"learning_rate": 0.00018010010944693848, |
|
"loss": 1.4132, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.1436116099357605, |
|
"learning_rate": 0.00017907757369376985, |
|
"loss": 1.416, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.0204081632653061, |
|
"grad_norm": 0.1707429438829422, |
|
"learning_rate": 0.0001780324790952092, |
|
"loss": 1.3913, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0204081632653061, |
|
"grad_norm": 0.17117524147033691, |
|
"learning_rate": 0.00017696512379049325, |
|
"loss": 1.3963, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.0408163265306123, |
|
"grad_norm": 0.13410089910030365, |
|
"learning_rate": 0.0001758758122692791, |
|
"loss": 1.392, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.0408163265306123, |
|
"eval_loss": 1.3676769733428955, |
|
"eval_runtime": 270.8566, |
|
"eval_samples_per_second": 6.258, |
|
"eval_steps_per_second": 3.131, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.0612244897959184, |
|
"grad_norm": 0.18877607583999634, |
|
"learning_rate": 0.00017476485528478093, |
|
"loss": 1.3854, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.0816326530612246, |
|
"grad_norm": 0.1752927452325821, |
|
"learning_rate": 0.00017363256976511972, |
|
"loss": 1.3759, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.1020408163265305, |
|
"grad_norm": 0.17180170118808746, |
|
"learning_rate": 0.000172479278722912, |
|
"loss": 1.3614, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.1224489795918366, |
|
"grad_norm": 0.1640290915966034, |
|
"learning_rate": 0.00017130531116312203, |
|
"loss": 1.3853, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.2047068476676941, |
|
"learning_rate": 0.0001701110019892053, |
|
"loss": 1.3699, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.163265306122449, |
|
"grad_norm": 0.1835869997739792, |
|
"learning_rate": 0.00016889669190756868, |
|
"loss": 1.3403, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.183673469387755, |
|
"grad_norm": 0.16733241081237793, |
|
"learning_rate": 0.00016766272733037576, |
|
"loss": 1.3609, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.2040816326530612, |
|
"grad_norm": 0.178726926445961, |
|
"learning_rate": 0.00016640946027672392, |
|
"loss": 1.3651, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.2244897959183674, |
|
"grad_norm": 0.16719630360603333, |
|
"learning_rate": 0.00016513724827222227, |
|
"loss": 1.3676, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.2448979591836735, |
|
"grad_norm": 0.15999363362789154, |
|
"learning_rate": 0.00016384645424699835, |
|
"loss": 1.3651, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.2653061224489797, |
|
"grad_norm": 0.1705988198518753, |
|
"learning_rate": 0.00016253744643216368, |
|
"loss": 1.3757, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.14996370673179626, |
|
"learning_rate": 0.0001612105982547663, |
|
"loss": 1.3474, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.306122448979592, |
|
"grad_norm": 0.19127260148525238, |
|
"learning_rate": 0.0001598662882312615, |
|
"loss": 1.3414, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.306122448979592, |
|
"eval_loss": 1.331880807876587, |
|
"eval_runtime": 270.8424, |
|
"eval_samples_per_second": 6.258, |
|
"eval_steps_per_second": 3.131, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.3265306122448979, |
|
"grad_norm": 0.16125527024269104, |
|
"learning_rate": 0.00015850489985953076, |
|
"loss": 1.3509, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.346938775510204, |
|
"grad_norm": 0.1979473978281021, |
|
"learning_rate": 0.00015712682150947923, |
|
"loss": 1.3579, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.3673469387755102, |
|
"grad_norm": 0.18317992985248566, |
|
"learning_rate": 0.00015573244631224365, |
|
"loss": 1.3341, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.3877551020408163, |
|
"grad_norm": 0.1646898239850998, |
|
"learning_rate": 0.0001543221720480419, |
|
"loss": 1.3361, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.4081632653061225, |
|
"grad_norm": 0.1760271042585373, |
|
"learning_rate": 0.00015289640103269625, |
|
"loss": 1.358, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.165283203125, |
|
"learning_rate": 0.0001514555400028629, |
|
"loss": 1.3072, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.4489795918367347, |
|
"grad_norm": 0.1507076472043991, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.3133, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.469387755102041, |
|
"grad_norm": 0.16913647949695587, |
|
"learning_rate": 0.00014853019625310813, |
|
"loss": 1.3232, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.489795918367347, |
|
"grad_norm": 0.18266479671001434, |
|
"learning_rate": 0.0001470465480602756, |
|
"loss": 1.3512, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.510204081632653, |
|
"grad_norm": 0.19301828742027283, |
|
"learning_rate": 0.0001455494786690634, |
|
"loss": 1.3241, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.5306122448979593, |
|
"grad_norm": 0.16109652817249298, |
|
"learning_rate": 0.00014403941515576344, |
|
"loss": 1.3256, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.5510204081632653, |
|
"grad_norm": 0.17053867876529694, |
|
"learning_rate": 0.00014251678830356408, |
|
"loss": 1.3162, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 0.17348544299602509, |
|
"learning_rate": 0.00014098203247965875, |
|
"loss": 1.3213, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"eval_loss": 1.3028697967529297, |
|
"eval_runtime": 270.8095, |
|
"eval_samples_per_second": 6.259, |
|
"eval_steps_per_second": 3.131, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.5918367346938775, |
|
"grad_norm": 0.1703907549381256, |
|
"learning_rate": 0.00013943558551133186, |
|
"loss": 1.3073, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.6122448979591837, |
|
"grad_norm": 0.17313100397586823, |
|
"learning_rate": 0.0001378778885610576, |
|
"loss": 1.3232, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6326530612244898, |
|
"grad_norm": 0.17237025499343872, |
|
"learning_rate": 0.00013630938600064747, |
|
"loss": 1.3406, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.6530612244897958, |
|
"grad_norm": 0.19658459722995758, |
|
"learning_rate": 0.00013473052528448201, |
|
"loss": 1.3114, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.6734693877551021, |
|
"grad_norm": 0.20599938929080963, |
|
"learning_rate": 0.0001331417568218636, |
|
"loss": 1.3288, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.693877551020408, |
|
"grad_norm": 0.17759399116039276, |
|
"learning_rate": 0.00013154353384852558, |
|
"loss": 1.2995, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.18712250888347626, |
|
"learning_rate": 0.00012993631229733582, |
|
"loss": 1.2895, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.7346938775510203, |
|
"grad_norm": 0.1991330236196518, |
|
"learning_rate": 0.00012832055066823038, |
|
"loss": 1.2886, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.7551020408163265, |
|
"grad_norm": 0.22125203907489777, |
|
"learning_rate": 0.00012669670989741517, |
|
"loss": 1.3233, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.7755102040816326, |
|
"grad_norm": 0.2052813619375229, |
|
"learning_rate": 0.00012506525322587207, |
|
"loss": 1.3079, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.7959183673469388, |
|
"grad_norm": 0.19290736317634583, |
|
"learning_rate": 0.00012342664606720822, |
|
"loss": 1.3174, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.816326530612245, |
|
"grad_norm": 0.20912542939186096, |
|
"learning_rate": 0.00012178135587488515, |
|
"loss": 1.2915, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.836734693877551, |
|
"grad_norm": 0.20760588347911835, |
|
"learning_rate": 0.00012012985200886602, |
|
"loss": 1.3028, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.836734693877551, |
|
"eval_loss": 1.2795333862304688, |
|
"eval_runtime": 270.6525, |
|
"eval_samples_per_second": 6.263, |
|
"eval_steps_per_second": 3.133, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.1996900886297226, |
|
"learning_rate": 0.00011847260560171896, |
|
"loss": 1.3119, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.8775510204081631, |
|
"grad_norm": 0.23766876757144928, |
|
"learning_rate": 0.00011681008942421483, |
|
"loss": 1.2978, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.8979591836734695, |
|
"grad_norm": 0.19782397150993347, |
|
"learning_rate": 0.00011514277775045768, |
|
"loss": 1.2955, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.9183673469387754, |
|
"grad_norm": 0.22519494593143463, |
|
"learning_rate": 0.00011347114622258612, |
|
"loss": 1.2957, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.9387755102040818, |
|
"grad_norm": 0.2590245306491852, |
|
"learning_rate": 0.00011179567171508463, |
|
"loss": 1.2809, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.9591836734693877, |
|
"grad_norm": 0.2235420197248459, |
|
"learning_rate": 0.00011011683219874323, |
|
"loss": 1.2784, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.9795918367346939, |
|
"grad_norm": 0.285740464925766, |
|
"learning_rate": 0.00010843510660430447, |
|
"loss": 1.309, |
|
"step": 98 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 196, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 49, |
|
"total_flos": 2.0418701605994496e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|