| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 420, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002380952380952381, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0, | |
| "loss": 0.5013, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.004761904761904762, | |
| "grad_norm": 1.2982189655303955, | |
| "learning_rate": 0.0, | |
| "loss": 0.4693, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.007142857142857143, | |
| "grad_norm": 1.3160841464996338, | |
| "learning_rate": 1.2920296742201791e-05, | |
| "loss": 0.4462, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.009523809523809525, | |
| "grad_norm": 1.3160841464996338, | |
| "learning_rate": 1.2920296742201791e-05, | |
| "loss": 0.8593, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.011904761904761904, | |
| "grad_norm": 1.3160841464996338, | |
| "learning_rate": 1.2920296742201791e-05, | |
| "loss": 0.8663, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.014285714285714285, | |
| "grad_norm": 1.0395259857177734, | |
| "learning_rate": 2.0478185834579563e-05, | |
| "loss": 0.5107, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.016666666666666666, | |
| "grad_norm": 1.0972439050674438, | |
| "learning_rate": 2.5840593484403582e-05, | |
| "loss": 0.5832, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.01904761904761905, | |
| "grad_norm": 1.2451457977294922, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4994, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.02142857142857143, | |
| "grad_norm": 1.5299328565597534, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5632, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.023809523809523808, | |
| "grad_norm": 1.456752061843872, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5448, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02619047619047619, | |
| "grad_norm": 0.7797374725341797, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4222, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.02857142857142857, | |
| "grad_norm": 0.992181658744812, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5173, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.030952380952380953, | |
| "grad_norm": 1.0963839292526245, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4048, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.03333333333333333, | |
| "grad_norm": 1.3775476217269897, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4774, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.03571428571428571, | |
| "grad_norm": 1.551168441772461, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5603, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.0380952380952381, | |
| "grad_norm": 1.189092993736267, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4756, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.04047619047619048, | |
| "grad_norm": 1.0813432931900024, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4955, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.04285714285714286, | |
| "grad_norm": 1.224169373512268, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4906, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.04523809523809524, | |
| "grad_norm": 0.7351661324501038, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4717, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.047619047619047616, | |
| "grad_norm": 0.8378927707672119, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4436, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 1.2906426191329956, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4294, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.05238095238095238, | |
| "grad_norm": 0.9292688369750977, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4252, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.05476190476190476, | |
| "grad_norm": 0.757605254650116, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4078, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.05714285714285714, | |
| "grad_norm": 0.8978239893913269, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4561, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.05952380952380952, | |
| "grad_norm": 0.7445272207260132, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4007, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.06190476190476191, | |
| "grad_norm": 0.755946934223175, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3974, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.06428571428571428, | |
| "grad_norm": 1.2127957344055176, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5496, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.06666666666666667, | |
| "grad_norm": 1.2571460008621216, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4526, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.06904761904761905, | |
| "grad_norm": 0.9187604784965515, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4042, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 0.990939736366272, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4855, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07380952380952381, | |
| "grad_norm": 1.0081547498703003, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5543, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.0761904761904762, | |
| "grad_norm": 0.696738600730896, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4036, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.07857142857142857, | |
| "grad_norm": 1.7002297639846802, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4686, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.08095238095238096, | |
| "grad_norm": 0.8276028633117676, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4009, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.08333333333333333, | |
| "grad_norm": 1.270995020866394, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4926, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.08571428571428572, | |
| "grad_norm": 1.0746862888336182, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4982, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.0880952380952381, | |
| "grad_norm": 1.0116355419158936, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3548, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.09047619047619047, | |
| "grad_norm": 0.9556488394737244, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4268, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.09285714285714286, | |
| "grad_norm": 0.752404510974884, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4496, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.09523809523809523, | |
| "grad_norm": 0.7381091713905334, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4412, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09761904761904762, | |
| "grad_norm": 1.2671267986297607, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4382, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.7863288521766663, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4186, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.10238095238095238, | |
| "grad_norm": 0.9142912030220032, | |
| "learning_rate": 3e-05, | |
| "loss": 0.442, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.10476190476190476, | |
| "grad_norm": 0.9501491189002991, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5339, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.10714285714285714, | |
| "grad_norm": 0.888054609298706, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4928, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.10952380952380952, | |
| "grad_norm": 0.8860450983047485, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4081, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.11190476190476191, | |
| "grad_norm": 0.894538164138794, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4213, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.11428571428571428, | |
| "grad_norm": 1.657795786857605, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4112, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.11666666666666667, | |
| "grad_norm": 1.4137582778930664, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5444, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.11904761904761904, | |
| "grad_norm": 1.1915364265441895, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4655, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.12142857142857143, | |
| "grad_norm": 0.8001396059989929, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4571, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.12380952380952381, | |
| "grad_norm": 0.9784255623817444, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4589, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.1261904761904762, | |
| "grad_norm": 1.1923211812973022, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5349, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.12857142857142856, | |
| "grad_norm": 1.6921881437301636, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5921, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.13095238095238096, | |
| "grad_norm": 1.4049354791641235, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3999, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 0.6025511026382446, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3708, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.1357142857142857, | |
| "grad_norm": 1.200055480003357, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6888, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.1380952380952381, | |
| "grad_norm": 0.9409858584403992, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4722, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.14047619047619048, | |
| "grad_norm": 1.47160804271698, | |
| "learning_rate": 3e-05, | |
| "loss": 1.0772, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 0.935752809047699, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3725, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.14523809523809525, | |
| "grad_norm": 0.8135111927986145, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4312, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.14761904761904762, | |
| "grad_norm": 1.1500678062438965, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4317, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 1.0388532876968384, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5262, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.1523809523809524, | |
| "grad_norm": 1.0478947162628174, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5224, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.15476190476190477, | |
| "grad_norm": 0.6370094418525696, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4117, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.15714285714285714, | |
| "grad_norm": 0.8117276430130005, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4513, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.1595238095238095, | |
| "grad_norm": 1.4209480285644531, | |
| "learning_rate": 3e-05, | |
| "loss": 0.419, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.1619047619047619, | |
| "grad_norm": 1.3270151615142822, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5064, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.16428571428571428, | |
| "grad_norm": 1.4599213600158691, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5342, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 0.7401596307754517, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4609, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.16904761904761906, | |
| "grad_norm": 0.6574556827545166, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3689, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.17142857142857143, | |
| "grad_norm": 1.6753238439559937, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5226, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.1738095238095238, | |
| "grad_norm": 0.8294124603271484, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4176, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.1761904761904762, | |
| "grad_norm": 0.9138219356536865, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4123, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.17857142857142858, | |
| "grad_norm": 0.8199627995491028, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4374, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.18095238095238095, | |
| "grad_norm": 0.7391550540924072, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3825, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.18333333333333332, | |
| "grad_norm": 0.729434609413147, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4824, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.18571428571428572, | |
| "grad_norm": 0.7266400456428528, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3481, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.1880952380952381, | |
| "grad_norm": 0.8050060272216797, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4694, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.19047619047619047, | |
| "grad_norm": 1.172167181968689, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4291, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.19285714285714287, | |
| "grad_norm": 1.1613800525665283, | |
| "learning_rate": 3e-05, | |
| "loss": 0.483, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.19523809523809524, | |
| "grad_norm": 0.8675224184989929, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4436, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.1976190476190476, | |
| "grad_norm": 0.899101734161377, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4858, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.030885934829712, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5346, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.20238095238095238, | |
| "grad_norm": 0.9242229461669922, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4349, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.20476190476190476, | |
| "grad_norm": 0.7717217803001404, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3759, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.20714285714285716, | |
| "grad_norm": 1.2170741558074951, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4937, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.20952380952380953, | |
| "grad_norm": 1.2934021949768066, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4993, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.2119047619047619, | |
| "grad_norm": 1.3435097932815552, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4603, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 0.8454081416130066, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5203, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.21666666666666667, | |
| "grad_norm": 0.8319071531295776, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4997, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.21904761904761905, | |
| "grad_norm": 0.6560430526733398, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4763, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.22142857142857142, | |
| "grad_norm": 2.126009941101074, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7399, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.22380952380952382, | |
| "grad_norm": 1.1273273229599, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5303, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.2261904761904762, | |
| "grad_norm": 0.9536347985267639, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5147, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 0.6337579488754272, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4474, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.23095238095238096, | |
| "grad_norm": 0.6355674862861633, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3958, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.23333333333333334, | |
| "grad_norm": 0.9631844758987427, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4463, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.2357142857142857, | |
| "grad_norm": 0.7559183239936829, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4952, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.23809523809523808, | |
| "grad_norm": 0.926929771900177, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5318, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24047619047619048, | |
| "grad_norm": 0.8441896438598633, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4411, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.24285714285714285, | |
| "grad_norm": 1.1027876138687134, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5618, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.24523809523809523, | |
| "grad_norm": 0.6018999814987183, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4046, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.24761904761904763, | |
| "grad_norm": 0.8065565228462219, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4195, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.6117183566093445, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3556, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.2523809523809524, | |
| "grad_norm": 1.0436320304870605, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4699, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.25476190476190474, | |
| "grad_norm": 1.1943013668060303, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5636, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.2571428571428571, | |
| "grad_norm": 0.8576936721801758, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4178, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.25952380952380955, | |
| "grad_norm": 0.7651050686836243, | |
| "learning_rate": 3e-05, | |
| "loss": 0.436, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.2619047619047619, | |
| "grad_norm": 0.787661612033844, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4378, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2642857142857143, | |
| "grad_norm": 0.961089551448822, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4979, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 0.7426000833511353, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4908, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.26904761904761904, | |
| "grad_norm": 1.0738164186477661, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5052, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.2714285714285714, | |
| "grad_norm": 1.3592005968093872, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5155, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.27380952380952384, | |
| "grad_norm": 0.776147723197937, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4199, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.2761904761904762, | |
| "grad_norm": 1.184778094291687, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4471, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.2785714285714286, | |
| "grad_norm": 0.9415385127067566, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4518, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.28095238095238095, | |
| "grad_norm": 0.9082314372062683, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4986, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.2833333333333333, | |
| "grad_norm": 1.6528809070587158, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6329, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.6157988905906677, | |
| "learning_rate": 3e-05, | |
| "loss": 0.346, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.28809523809523807, | |
| "grad_norm": 1.335919737815857, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5417, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.2904761904761905, | |
| "grad_norm": 1.7414772510528564, | |
| "learning_rate": 3e-05, | |
| "loss": 0.513, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.29285714285714287, | |
| "grad_norm": 1.0106985569000244, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5358, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.29523809523809524, | |
| "grad_norm": 0.8379469513893127, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4548, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.2976190476190476, | |
| "grad_norm": 0.8793790340423584, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5191, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.7612943053245544, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4913, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.30238095238095236, | |
| "grad_norm": 1.0233980417251587, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5259, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.3047619047619048, | |
| "grad_norm": 0.9049614071846008, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4164, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.30714285714285716, | |
| "grad_norm": 1.390436053276062, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4505, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.30952380952380953, | |
| "grad_norm": 0.705032229423523, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4763, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3119047619047619, | |
| "grad_norm": 1.1658718585968018, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4132, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.3142857142857143, | |
| "grad_norm": 1.0997470617294312, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4877, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.31666666666666665, | |
| "grad_norm": 0.7365838885307312, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3902, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.319047619047619, | |
| "grad_norm": 1.0315278768539429, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5093, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.32142857142857145, | |
| "grad_norm": 0.8308622241020203, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4309, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3238095238095238, | |
| "grad_norm": 0.8301631808280945, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5057, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.3261904761904762, | |
| "grad_norm": 0.6643893122673035, | |
| "learning_rate": 3e-05, | |
| "loss": 0.44, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.32857142857142857, | |
| "grad_norm": 0.9323320984840393, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4377, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.33095238095238094, | |
| "grad_norm": 0.8339701294898987, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4982, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.8500747680664062, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4303, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3357142857142857, | |
| "grad_norm": 0.697967529296875, | |
| "learning_rate": 3e-05, | |
| "loss": 0.447, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.3380952380952381, | |
| "grad_norm": 1.1016360521316528, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4827, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.3404761904761905, | |
| "grad_norm": 0.9719865918159485, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4719, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.34285714285714286, | |
| "grad_norm": 0.8482648134231567, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4171, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.34523809523809523, | |
| "grad_norm": 1.2719123363494873, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4444, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3476190476190476, | |
| "grad_norm": 1.1623255014419556, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4033, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.7803097367286682, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3299, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.3523809523809524, | |
| "grad_norm": 0.6812254786491394, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3652, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.3547619047619048, | |
| "grad_norm": 1.0708630084991455, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4121, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 1.3431651592254639, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4235, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3595238095238095, | |
| "grad_norm": 0.779104471206665, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3404, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.3619047619047619, | |
| "grad_norm": 0.6354933381080627, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4572, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.36428571428571427, | |
| "grad_norm": 0.6587569117546082, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3692, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.36666666666666664, | |
| "grad_norm": 1.0237056016921997, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4356, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.36904761904761907, | |
| "grad_norm": 0.9965682625770569, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4609, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.37142857142857144, | |
| "grad_norm": 0.9826563000679016, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4502, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.3738095238095238, | |
| "grad_norm": 1.2390687465667725, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4734, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.3761904761904762, | |
| "grad_norm": 0.7621622681617737, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4267, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.37857142857142856, | |
| "grad_norm": 1.300564169883728, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4018, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 1.2761445045471191, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5127, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38333333333333336, | |
| "grad_norm": 0.933185875415802, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5515, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.38571428571428573, | |
| "grad_norm": 1.3930639028549194, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5837, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.3880952380952381, | |
| "grad_norm": 1.7598650455474854, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6716, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.3904761904761905, | |
| "grad_norm": 0.9068734645843506, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4984, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.39285714285714285, | |
| "grad_norm": 1.5030951499938965, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4181, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3952380952380952, | |
| "grad_norm": 0.6494777798652649, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.3976190476190476, | |
| "grad_norm": 0.7952489256858826, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4569, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.175072193145752, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4385, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.4023809523809524, | |
| "grad_norm": 0.8904735445976257, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4107, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.40476190476190477, | |
| "grad_norm": 1.0196988582611084, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3662, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.40714285714285714, | |
| "grad_norm": 1.025765299797058, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4981, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.4095238095238095, | |
| "grad_norm": 0.8151318430900574, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5131, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.4119047619047619, | |
| "grad_norm": 0.8304903507232666, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4652, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.4142857142857143, | |
| "grad_norm": 0.8581998944282532, | |
| "learning_rate": 3e-05, | |
| "loss": 0.492, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.4166666666666667, | |
| "grad_norm": 0.7963671088218689, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4149, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.41904761904761906, | |
| "grad_norm": 0.814155638217926, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5259, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.42142857142857143, | |
| "grad_norm": 0.819995105266571, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4502, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.4238095238095238, | |
| "grad_norm": 0.6711002588272095, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4163, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.4261904761904762, | |
| "grad_norm": 0.8345437049865723, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4161, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.5476064682006836, | |
| "learning_rate": 3e-05, | |
| "loss": 0.38, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.430952380952381, | |
| "grad_norm": 0.836280882358551, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4163, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.43333333333333335, | |
| "grad_norm": 0.8886452317237854, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4447, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.4357142857142857, | |
| "grad_norm": 0.89081209897995, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3914, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.4380952380952381, | |
| "grad_norm": 2.051706075668335, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5365, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.44047619047619047, | |
| "grad_norm": 0.8237314224243164, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4411, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.44285714285714284, | |
| "grad_norm": 0.8707730770111084, | |
| "learning_rate": 3e-05, | |
| "loss": 0.505, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.4452380952380952, | |
| "grad_norm": 0.922207772731781, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4698, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.44761904761904764, | |
| "grad_norm": 1.2687690258026123, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5653, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.8072938919067383, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3777, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.4523809523809524, | |
| "grad_norm": 0.8983867764472961, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4334, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45476190476190476, | |
| "grad_norm": 0.9799190163612366, | |
| "learning_rate": 3e-05, | |
| "loss": 0.567, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 0.8757649660110474, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4425, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.4595238095238095, | |
| "grad_norm": 1.6467727422714233, | |
| "learning_rate": 3e-05, | |
| "loss": 0.484, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.46190476190476193, | |
| "grad_norm": 0.6561694741249084, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4947, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.4642857142857143, | |
| "grad_norm": 0.8178306818008423, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4398, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.4666666666666667, | |
| "grad_norm": 0.9850760102272034, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4436, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.46904761904761905, | |
| "grad_norm": 0.8389570713043213, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4303, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.4714285714285714, | |
| "grad_norm": 1.0063328742980957, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4574, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.4738095238095238, | |
| "grad_norm": 0.7931296229362488, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4096, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 1.4383584260940552, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5578, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4785714285714286, | |
| "grad_norm": 1.2136746644973755, | |
| "learning_rate": 3e-05, | |
| "loss": 0.505, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.48095238095238096, | |
| "grad_norm": 0.8902712464332581, | |
| "learning_rate": 3e-05, | |
| "loss": 0.385, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.48333333333333334, | |
| "grad_norm": 0.6584445238113403, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4603, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.4857142857142857, | |
| "grad_norm": 1.4439114332199097, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4835, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.4880952380952381, | |
| "grad_norm": 1.160464882850647, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4695, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.49047619047619045, | |
| "grad_norm": 1.3329150676727295, | |
| "learning_rate": 3e-05, | |
| "loss": 0.493, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.4928571428571429, | |
| "grad_norm": 0.7865476012229919, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3839, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.49523809523809526, | |
| "grad_norm": 0.6394692063331604, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4198, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.4976190476190476, | |
| "grad_norm": 0.7849683165550232, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4416, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.8394088745117188, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4106, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5023809523809524, | |
| "grad_norm": 1.2182062864303589, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4312, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.5047619047619047, | |
| "grad_norm": 1.8101513385772705, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4463, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.5071428571428571, | |
| "grad_norm": 1.2146952152252197, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4194, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.5095238095238095, | |
| "grad_norm": 1.2878483533859253, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4494, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.5119047619047619, | |
| "grad_norm": 0.6163401007652283, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3637, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.5142857142857142, | |
| "grad_norm": 0.8795377016067505, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4442, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.5166666666666667, | |
| "grad_norm": 1.1143704652786255, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5383, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.5190476190476191, | |
| "grad_norm": 0.9760447144508362, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5384, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.5214285714285715, | |
| "grad_norm": 0.9762544631958008, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4292, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.5238095238095238, | |
| "grad_norm": 1.2184864282608032, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6392, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5261904761904762, | |
| "grad_norm": 0.9517297148704529, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4863, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.5285714285714286, | |
| "grad_norm": 0.7961365580558777, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3822, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.530952380952381, | |
| "grad_norm": 0.8735774755477905, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5283, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.6506310105323792, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3831, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.5357142857142857, | |
| "grad_norm": 1.1028398275375366, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5849, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5380952380952381, | |
| "grad_norm": 0.646365225315094, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4134, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.5404761904761904, | |
| "grad_norm": 0.7208945751190186, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4387, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.5428571428571428, | |
| "grad_norm": 1.1932159662246704, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4882, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.5452380952380952, | |
| "grad_norm": 0.5648031234741211, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3583, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.5476190476190477, | |
| "grad_norm": 0.8216423988342285, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4276, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.6750469207763672, | |
| "learning_rate": 3e-05, | |
| "loss": 0.429, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.5523809523809524, | |
| "grad_norm": 0.6576554179191589, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4449, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.5547619047619048, | |
| "grad_norm": 1.514758586883545, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4153, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.5571428571428572, | |
| "grad_norm": 1.1545631885528564, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4554, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.5595238095238095, | |
| "grad_norm": 0.7327958941459656, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3641, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5619047619047619, | |
| "grad_norm": 1.4862022399902344, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5283, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.5642857142857143, | |
| "grad_norm": 1.2565538883209229, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4605, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.5666666666666667, | |
| "grad_norm": 0.5355277061462402, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3679, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.569047619047619, | |
| "grad_norm": 0.8581258058547974, | |
| "learning_rate": 3e-05, | |
| "loss": 0.462, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.9366555213928223, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4788, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5738095238095238, | |
| "grad_norm": 0.927740216255188, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4406, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.5761904761904761, | |
| "grad_norm": 0.8326394557952881, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3595, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.5785714285714286, | |
| "grad_norm": 1.214613676071167, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5004, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.580952380952381, | |
| "grad_norm": 0.9543561935424805, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4941, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.5833333333333334, | |
| "grad_norm": 1.1488438844680786, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5066, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.5857142857142857, | |
| "grad_norm": 0.9091154932975769, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4834, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.5880952380952381, | |
| "grad_norm": 0.7307273149490356, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5543, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.5904761904761905, | |
| "grad_norm": 1.0417815446853638, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4615, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.5928571428571429, | |
| "grad_norm": 0.7121735215187073, | |
| "learning_rate": 3e-05, | |
| "loss": 0.374, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.5952380952380952, | |
| "grad_norm": 1.0804853439331055, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3975, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5976190476190476, | |
| "grad_norm": 0.7743527293205261, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4129, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.7785153985023499, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4027, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.6023809523809524, | |
| "grad_norm": 1.36514413356781, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5362, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.6047619047619047, | |
| "grad_norm": 0.8658007979393005, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4488, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.6071428571428571, | |
| "grad_norm": 0.807852566242218, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4065, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.6095238095238096, | |
| "grad_norm": 1.2672406435012817, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4587, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.611904761904762, | |
| "grad_norm": 1.0440469980239868, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4815, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.6142857142857143, | |
| "grad_norm": 0.9778774380683899, | |
| "learning_rate": 3e-05, | |
| "loss": 0.385, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.6166666666666667, | |
| "grad_norm": 0.8957841396331787, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4405, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.6190476190476191, | |
| "grad_norm": 0.9312807321548462, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4902, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6214285714285714, | |
| "grad_norm": 0.6373504400253296, | |
| "learning_rate": 3e-05, | |
| "loss": 0.436, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.6238095238095238, | |
| "grad_norm": 0.8105682134628296, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.6261904761904762, | |
| "grad_norm": 1.1135553121566772, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4624, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.6285714285714286, | |
| "grad_norm": 0.8639890551567078, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3969, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.6309523809523809, | |
| "grad_norm": 1.8037925958633423, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5693, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.6333333333333333, | |
| "grad_norm": 0.6781933307647705, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4913, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.6357142857142857, | |
| "grad_norm": 0.8383111953735352, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3797, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.638095238095238, | |
| "grad_norm": 0.7477337718009949, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4539, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.6404761904761904, | |
| "grad_norm": 0.7518517971038818, | |
| "learning_rate": 3e-05, | |
| "loss": 0.445, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 0.9350783824920654, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4111, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6452380952380953, | |
| "grad_norm": 0.7175120115280151, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3893, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.6476190476190476, | |
| "grad_norm": 0.9294947981834412, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4438, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.9406439065933228, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4953, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.6523809523809524, | |
| "grad_norm": 0.9267202019691467, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4601, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.6547619047619048, | |
| "grad_norm": 0.9095003604888916, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5238, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6571428571428571, | |
| "grad_norm": 1.1330962181091309, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4776, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.6595238095238095, | |
| "grad_norm": 0.8976531028747559, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4945, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.6619047619047619, | |
| "grad_norm": 1.398485541343689, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4164, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.6642857142857143, | |
| "grad_norm": 0.8878613710403442, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4601, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.6003170609474182, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3919, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.669047619047619, | |
| "grad_norm": 0.6366980075836182, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4499, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.6714285714285714, | |
| "grad_norm": 1.0374082326889038, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4571, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.6738095238095239, | |
| "grad_norm": 1.0846024751663208, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4331, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.6761904761904762, | |
| "grad_norm": 1.9345874786376953, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4802, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.6785714285714286, | |
| "grad_norm": 0.9415948390960693, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4953, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.680952380952381, | |
| "grad_norm": 0.7957614660263062, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4365, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.6833333333333333, | |
| "grad_norm": 0.9216206669807434, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4168, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 1.051074743270874, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4889, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.6880952380952381, | |
| "grad_norm": 0.7795323729515076, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3523, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.6904761904761905, | |
| "grad_norm": 0.8779550790786743, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3868, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6928571428571428, | |
| "grad_norm": 0.8232953548431396, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3555, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.6952380952380952, | |
| "grad_norm": 1.3102864027023315, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5208, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.6976190476190476, | |
| "grad_norm": 0.6595520973205566, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3986, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.9423562288284302, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4447, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.7023809523809523, | |
| "grad_norm": 0.7358708381652832, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4395, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.7047619047619048, | |
| "grad_norm": 0.8931717276573181, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4249, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.7071428571428572, | |
| "grad_norm": 0.8583389520645142, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4519, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.7095238095238096, | |
| "grad_norm": 0.824382483959198, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4011, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.7119047619047619, | |
| "grad_norm": 0.7034823298454285, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4816, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.692171573638916, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4142, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7166666666666667, | |
| "grad_norm": 0.9514793753623962, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4276, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.719047619047619, | |
| "grad_norm": 0.7801012396812439, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4587, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.7214285714285714, | |
| "grad_norm": 0.7651001214981079, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3756, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.7238095238095238, | |
| "grad_norm": 0.7955180406570435, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4012, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.7261904761904762, | |
| "grad_norm": 1.8750542402267456, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5779, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.7285714285714285, | |
| "grad_norm": 1.1346620321273804, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4389, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.7309523809523809, | |
| "grad_norm": 0.8639288544654846, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4534, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.7333333333333333, | |
| "grad_norm": 0.8282825946807861, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4218, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.7357142857142858, | |
| "grad_norm": 0.7293280959129333, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3725, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.7380952380952381, | |
| "grad_norm": 0.747637927532196, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4778, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7404761904761905, | |
| "grad_norm": 0.6695718765258789, | |
| "learning_rate": 3e-05, | |
| "loss": 0.432, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.7428571428571429, | |
| "grad_norm": 1.0372726917266846, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4589, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.7452380952380953, | |
| "grad_norm": 0.7464996576309204, | |
| "learning_rate": 3e-05, | |
| "loss": 0.404, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.7476190476190476, | |
| "grad_norm": 1.2544994354248047, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5555, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.6378808617591858, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4026, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.7523809523809524, | |
| "grad_norm": 0.8749919533729553, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7496, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.7547619047619047, | |
| "grad_norm": 0.6927523016929626, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4058, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.7571428571428571, | |
| "grad_norm": 1.0399421453475952, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4785, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.7595238095238095, | |
| "grad_norm": 0.8676436543464661, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4339, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 0.9331671595573425, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3896, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7642857142857142, | |
| "grad_norm": 0.8491724729537964, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3904, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.7666666666666667, | |
| "grad_norm": 0.6475184559822083, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4101, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.7690476190476191, | |
| "grad_norm": 1.3740770816802979, | |
| "learning_rate": 3e-05, | |
| "loss": 0.498, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.7714285714285715, | |
| "grad_norm": 0.8867368102073669, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4102, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.7738095238095238, | |
| "grad_norm": 0.7927978038787842, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3936, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.7761904761904762, | |
| "grad_norm": 0.8019774556159973, | |
| "learning_rate": 3e-05, | |
| "loss": 0.402, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.7785714285714286, | |
| "grad_norm": 1.3307939767837524, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4611, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.780952380952381, | |
| "grad_norm": 0.7460595965385437, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4083, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.7833333333333333, | |
| "grad_norm": 0.5957393646240234, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3989, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 0.9565883278846741, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4277, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7880952380952381, | |
| "grad_norm": 0.7315310835838318, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5165, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.7904761904761904, | |
| "grad_norm": 1.2769473791122437, | |
| "learning_rate": 3e-05, | |
| "loss": 0.42, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.7928571428571428, | |
| "grad_norm": 0.7580127120018005, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4258, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.7952380952380952, | |
| "grad_norm": 0.7266448140144348, | |
| "learning_rate": 3e-05, | |
| "loss": 0.398, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.7976190476190477, | |
| "grad_norm": 0.6106278896331787, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4565, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.2278895378112793, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5243, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.8023809523809524, | |
| "grad_norm": 0.8991657495498657, | |
| "learning_rate": 3e-05, | |
| "loss": 0.389, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.8047619047619048, | |
| "grad_norm": 0.9713463187217712, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3885, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.8071428571428572, | |
| "grad_norm": 1.4198249578475952, | |
| "learning_rate": 3e-05, | |
| "loss": 0.433, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.8095238095238095, | |
| "grad_norm": 0.5804428458213806, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3786, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8119047619047619, | |
| "grad_norm": 0.7774984240531921, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4649, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.8142857142857143, | |
| "grad_norm": 0.8857919573783875, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4717, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.8166666666666667, | |
| "grad_norm": 0.7703973054885864, | |
| "learning_rate": 3e-05, | |
| "loss": 0.428, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.819047619047619, | |
| "grad_norm": 0.9551693797111511, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4455, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.8214285714285714, | |
| "grad_norm": 0.6841679215431213, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4176, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.8238095238095238, | |
| "grad_norm": 0.9121724963188171, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4713, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.8261904761904761, | |
| "grad_norm": 0.8475046753883362, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3869, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.8285714285714286, | |
| "grad_norm": 1.2067185640335083, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4964, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.830952380952381, | |
| "grad_norm": 0.7562458515167236, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4228, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.6852816939353943, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3723, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8357142857142857, | |
| "grad_norm": 0.6030831336975098, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5053, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.8380952380952381, | |
| "grad_norm": 1.0555213689804077, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4575, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.8404761904761905, | |
| "grad_norm": 0.6861211657524109, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3885, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.8428571428571429, | |
| "grad_norm": 0.9972875714302063, | |
| "learning_rate": 3e-05, | |
| "loss": 0.438, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.8452380952380952, | |
| "grad_norm": 0.7220526933670044, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4236, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.8476190476190476, | |
| "grad_norm": 0.5923830270767212, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3371, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.9891591668128967, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4189, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.8523809523809524, | |
| "grad_norm": 0.8928924798965454, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4261, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.8547619047619047, | |
| "grad_norm": 0.6894112229347229, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4658, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.7407474517822266, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4619, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8595238095238096, | |
| "grad_norm": 0.6776091456413269, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4396, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.861904761904762, | |
| "grad_norm": 1.4972211122512817, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4552, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.8642857142857143, | |
| "grad_norm": 1.076313853263855, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4211, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.8666666666666667, | |
| "grad_norm": 0.8312737345695496, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3821, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.8690476190476191, | |
| "grad_norm": 0.6959190368652344, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4649, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.8714285714285714, | |
| "grad_norm": 0.9157832264900208, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4405, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.8738095238095238, | |
| "grad_norm": 1.097048282623291, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4014, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.8761904761904762, | |
| "grad_norm": 0.7263518571853638, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3528, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.8785714285714286, | |
| "grad_norm": 1.4131529331207275, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5023, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.8809523809523809, | |
| "grad_norm": 1.0101478099822998, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4401, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8833333333333333, | |
| "grad_norm": 1.016892433166504, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4605, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.8857142857142857, | |
| "grad_norm": 1.1927547454833984, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4652, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.888095238095238, | |
| "grad_norm": 0.7094401717185974, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4653, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.8904761904761904, | |
| "grad_norm": 0.9114309549331665, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4095, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.8928571428571429, | |
| "grad_norm": 0.7444086074829102, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4333, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.8952380952380953, | |
| "grad_norm": 0.8845041990280151, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5979, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.8976190476190476, | |
| "grad_norm": 1.1072129011154175, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5131, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 1.3130736351013184, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5109, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.9023809523809524, | |
| "grad_norm": 0.8190350532531738, | |
| "learning_rate": 3e-05, | |
| "loss": 0.427, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.9047619047619048, | |
| "grad_norm": 0.7488160729408264, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3939, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9071428571428571, | |
| "grad_norm": 1.0414373874664307, | |
| "learning_rate": 3e-05, | |
| "loss": 0.391, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.9095238095238095, | |
| "grad_norm": 1.3301194906234741, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5093, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.9119047619047619, | |
| "grad_norm": 0.9577381610870361, | |
| "learning_rate": 3e-05, | |
| "loss": 0.37, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 1.357572078704834, | |
| "learning_rate": 3e-05, | |
| "loss": 0.466, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.9166666666666666, | |
| "grad_norm": 0.7440205812454224, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4536, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.919047619047619, | |
| "grad_norm": 0.7036682367324829, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4357, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.9214285714285714, | |
| "grad_norm": 1.2188128232955933, | |
| "learning_rate": 3e-05, | |
| "loss": 0.409, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.9238095238095239, | |
| "grad_norm": 0.5263180732727051, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3951, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.9261904761904762, | |
| "grad_norm": 0.6690953373908997, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3835, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 0.9960648417472839, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4646, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.930952380952381, | |
| "grad_norm": 1.1153391599655151, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5308, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.9333333333333333, | |
| "grad_norm": 0.501700222492218, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3454, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.9357142857142857, | |
| "grad_norm": 0.760089099407196, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3501, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.9380952380952381, | |
| "grad_norm": 0.9005244970321655, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4264, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.9404761904761905, | |
| "grad_norm": 0.9984633922576904, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3715, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.9428571428571428, | |
| "grad_norm": 0.9046674370765686, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4708, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.9452380952380952, | |
| "grad_norm": 1.6578477621078491, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4625, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.9476190476190476, | |
| "grad_norm": 0.7396510243415833, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3877, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.679201066493988, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4501, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 0.8374237418174744, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3419, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9547619047619048, | |
| "grad_norm": 0.8782289624214172, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4413, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.9571428571428572, | |
| "grad_norm": 1.0352013111114502, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4878, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.9595238095238096, | |
| "grad_norm": 0.6308469176292419, | |
| "learning_rate": 3e-05, | |
| "loss": 0.386, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.9619047619047619, | |
| "grad_norm": 1.0782958269119263, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4223, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.9642857142857143, | |
| "grad_norm": 2.6651995182037354, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7216, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.9666666666666667, | |
| "grad_norm": 0.7647043466567993, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4042, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.969047619047619, | |
| "grad_norm": 0.6673374176025391, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4288, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.9714285714285714, | |
| "grad_norm": 0.6782795786857605, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3983, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.9738095238095238, | |
| "grad_norm": 0.7260273694992065, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4023, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.9761904761904762, | |
| "grad_norm": 0.8369085788726807, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3733, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9785714285714285, | |
| "grad_norm": 0.6836142539978027, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3972, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.9809523809523809, | |
| "grad_norm": 1.2415976524353027, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4806, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.9833333333333333, | |
| "grad_norm": 0.9629127979278564, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4785, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.9857142857142858, | |
| "grad_norm": 0.8129779696464539, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4583, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.9880952380952381, | |
| "grad_norm": 0.8651249408721924, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4687, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.9904761904761905, | |
| "grad_norm": 0.6915225982666016, | |
| "learning_rate": 3e-05, | |
| "loss": 0.414, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.9928571428571429, | |
| "grad_norm": 1.0824182033538818, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3789, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.9952380952380953, | |
| "grad_norm": 0.6016390919685364, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3457, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.9976190476190476, | |
| "grad_norm": 1.211536169052124, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5893, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.9525183439254761, | |
| "learning_rate": 3e-05, | |
| "loss": 0.4137, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 420, | |
| "total_flos": 1.7662621293477888e+17, | |
| "train_loss": 0.45642305286157697, | |
| "train_runtime": 1778.0298, | |
| "train_samples_per_second": 1.886, | |
| "train_steps_per_second": 0.236 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 420, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.7662621293477888e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |