|
{ |
|
"best_metric": 1.0, |
|
"best_model_checkpoint": "test_long_tokens/checkpoint-70", |
|
"epoch": 14.285714285714286, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 18.59529685974121, |
|
"learning_rate": 5e-06, |
|
"loss": 1.637, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 20.817951202392578, |
|
"learning_rate": 1e-05, |
|
"loss": 1.6372, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 19.524600982666016, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.4306, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 15.0996675491333, |
|
"learning_rate": 2e-05, |
|
"loss": 1.2657, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 12.507349967956543, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.8948, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 8.167460441589355, |
|
"learning_rate": 3e-05, |
|
"loss": 0.5404, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 5.33146858215332, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.5374, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8645276292335116, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.7161440253257751, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 0.2378, |
|
"eval_samples_per_second": 12.618, |
|
"eval_steps_per_second": 12.618, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 2.557964324951172, |
|
"learning_rate": 4e-05, |
|
"loss": 0.2147, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 4.4479289054870605, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.5833, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 5.119785308837891, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6062, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 3.064371347427368, |
|
"learning_rate": 4.9444444444444446e-05, |
|
"loss": 0.451, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 1.8660037517547607, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.3188, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 2.083087205886841, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.2287, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 2.0547335147857666, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"loss": 0.3423, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8645276292335116, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.459845632314682, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 0.2226, |
|
"eval_samples_per_second": 13.474, |
|
"eval_steps_per_second": 13.474, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 2.206693410873413, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.2449, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.9722664952278137, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.1875, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 1.500380039215088, |
|
"learning_rate": 4.6111111111111115e-05, |
|
"loss": 0.1429, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 1.9801063537597656, |
|
"learning_rate": 4.555555555555556e-05, |
|
"loss": 0.1752, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.7142857142857144, |
|
"grad_norm": 2.65030837059021, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.0763, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 3.4686992168426514, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.168, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 6.0364298820495605, |
|
"learning_rate": 4.388888888888889e-05, |
|
"loss": 0.1813, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.966131907308378, |
|
"eval_f1": 0.9019607843137256, |
|
"eval_loss": 0.14405852556228638, |
|
"eval_precision": 0.8846153846153846, |
|
"eval_recall": 0.92, |
|
"eval_runtime": 0.2168, |
|
"eval_samples_per_second": 13.839, |
|
"eval_steps_per_second": 13.839, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 3.142857142857143, |
|
"grad_norm": 0.7663143873214722, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.0552, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 3.2857142857142856, |
|
"grad_norm": 1.2191542387008667, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 0.1005, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 7.446482181549072, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.0559, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 3.571428571428571, |
|
"grad_norm": 1.18604576587677, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0494, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 3.7142857142857144, |
|
"grad_norm": 0.6873874664306641, |
|
"learning_rate": 4.111111111111111e-05, |
|
"loss": 0.0533, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 3.857142857142857, |
|
"grad_norm": 2.966907024383545, |
|
"learning_rate": 4.055555555555556e-05, |
|
"loss": 0.0625, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 3.066542863845825, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0734, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9732620320855615, |
|
"eval_f1": 0.8000000000000002, |
|
"eval_loss": 0.13226450979709625, |
|
"eval_precision": 0.8, |
|
"eval_recall": 0.8, |
|
"eval_runtime": 0.2294, |
|
"eval_samples_per_second": 13.077, |
|
"eval_steps_per_second": 13.077, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.142857142857143, |
|
"grad_norm": 0.8713329434394836, |
|
"learning_rate": 3.944444444444445e-05, |
|
"loss": 0.0673, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"grad_norm": 0.306024968624115, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.0146, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.428571428571429, |
|
"grad_norm": 0.3102547526359558, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 0.0169, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 4.571428571428571, |
|
"grad_norm": 5.933145046234131, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.0753, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 4.714285714285714, |
|
"grad_norm": 1.7290794849395752, |
|
"learning_rate": 3.722222222222222e-05, |
|
"loss": 0.0668, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 4.857142857142857, |
|
"grad_norm": 8.295025825500488, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.0786, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.47509345412254333, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.0124, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9928698752228164, |
|
"eval_f1": 0.8235294117647058, |
|
"eval_loss": 0.0685623362660408, |
|
"eval_precision": 0.8076923076923077, |
|
"eval_recall": 0.84, |
|
"eval_runtime": 0.2188, |
|
"eval_samples_per_second": 13.709, |
|
"eval_steps_per_second": 13.709, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 5.142857142857143, |
|
"grad_norm": 1.3178348541259766, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.0303, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 5.285714285714286, |
|
"grad_norm": 2.2010915279388428, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.045, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 5.428571428571429, |
|
"grad_norm": 1.1045790910720825, |
|
"learning_rate": 3.444444444444445e-05, |
|
"loss": 0.0145, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 5.571428571428571, |
|
"grad_norm": 1.0724176168441772, |
|
"learning_rate": 3.388888888888889e-05, |
|
"loss": 0.0063, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 0.7634330987930298, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0176, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 5.857142857142857, |
|
"grad_norm": 0.48025190830230713, |
|
"learning_rate": 3.277777777777778e-05, |
|
"loss": 0.0228, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.21735815703868866, |
|
"learning_rate": 3.222222222222223e-05, |
|
"loss": 0.0034, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.983957219251337, |
|
"eval_f1": 0.7058823529411765, |
|
"eval_loss": 0.16237980127334595, |
|
"eval_precision": 0.6923076923076923, |
|
"eval_recall": 0.72, |
|
"eval_runtime": 0.2281, |
|
"eval_samples_per_second": 13.15, |
|
"eval_steps_per_second": 13.15, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 6.142857142857143, |
|
"grad_norm": 1.1637978553771973, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 0.0137, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 6.285714285714286, |
|
"grad_norm": 0.21785947680473328, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.0039, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 6.428571428571429, |
|
"grad_norm": 20.79886245727539, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.1387, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 6.571428571428571, |
|
"grad_norm": 7.047394752502441, |
|
"learning_rate": 3e-05, |
|
"loss": 0.019, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 6.714285714285714, |
|
"grad_norm": 0.3814934194087982, |
|
"learning_rate": 2.9444444444444448e-05, |
|
"loss": 0.0059, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"grad_norm": 0.4642070233821869, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 0.005, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.06280206888914108, |
|
"learning_rate": 2.8333333333333335e-05, |
|
"loss": 0.0023, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9946524064171123, |
|
"eval_f1": 0.830188679245283, |
|
"eval_loss": 0.018056461587548256, |
|
"eval_precision": 0.7857142857142857, |
|
"eval_recall": 0.88, |
|
"eval_runtime": 0.2418, |
|
"eval_samples_per_second": 12.409, |
|
"eval_steps_per_second": 12.409, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 7.142857142857143, |
|
"grad_norm": 0.042585261166095734, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.002, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 7.285714285714286, |
|
"grad_norm": 3.1652538776397705, |
|
"learning_rate": 2.7222222222222223e-05, |
|
"loss": 0.0085, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 7.428571428571429, |
|
"grad_norm": 0.997791051864624, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.0154, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 7.571428571428571, |
|
"grad_norm": 0.2884855568408966, |
|
"learning_rate": 2.6111111111111114e-05, |
|
"loss": 0.0021, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 7.714285714285714, |
|
"grad_norm": 0.39060717821121216, |
|
"learning_rate": 2.5555555555555554e-05, |
|
"loss": 0.0026, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 7.857142857142857, |
|
"grad_norm": 15.578678131103516, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.031, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.1004723384976387, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.0023, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9946524064171123, |
|
"eval_f1": 0.8627450980392156, |
|
"eval_loss": 0.054031386971473694, |
|
"eval_precision": 0.8461538461538461, |
|
"eval_recall": 0.88, |
|
"eval_runtime": 0.3115, |
|
"eval_samples_per_second": 9.631, |
|
"eval_steps_per_second": 9.631, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 8.142857142857142, |
|
"grad_norm": 0.5730704665184021, |
|
"learning_rate": 2.3888888888888892e-05, |
|
"loss": 0.005, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 8.285714285714286, |
|
"grad_norm": 0.08043497800827026, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.0023, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 8.428571428571429, |
|
"grad_norm": 0.03008083440363407, |
|
"learning_rate": 2.277777777777778e-05, |
|
"loss": 0.0012, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 0.03248002007603645, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.0008, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 8.714285714285714, |
|
"grad_norm": 0.04838703200221062, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.0013, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 8.857142857142858, |
|
"grad_norm": 0.33141571283340454, |
|
"learning_rate": 2.111111111111111e-05, |
|
"loss": 0.0022, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.7117937803268433, |
|
"learning_rate": 2.0555555555555555e-05, |
|
"loss": 0.0069, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9269162210338681, |
|
"eval_f1": 0.5, |
|
"eval_loss": 0.5584124326705933, |
|
"eval_precision": 0.5789473684210527, |
|
"eval_recall": 0.44, |
|
"eval_runtime": 0.2368, |
|
"eval_samples_per_second": 12.67, |
|
"eval_steps_per_second": 12.67, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"grad_norm": 0.3541106581687927, |
|
"learning_rate": 2e-05, |
|
"loss": 0.003, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 9.285714285714286, |
|
"grad_norm": 6.2997145652771, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.0145, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 9.428571428571429, |
|
"grad_norm": 3.1364083290100098, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 0.0043, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 9.571428571428571, |
|
"grad_norm": 0.018303165212273598, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.0007, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 9.714285714285714, |
|
"grad_norm": 0.06226564571261406, |
|
"learning_rate": 1.777777777777778e-05, |
|
"loss": 0.0011, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 9.857142857142858, |
|
"grad_norm": 0.18134748935699463, |
|
"learning_rate": 1.7222222222222224e-05, |
|
"loss": 0.0014, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.013242754153907299, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0006, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.004223768133670092, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.2664, |
|
"eval_samples_per_second": 11.263, |
|
"eval_steps_per_second": 11.263, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 10.142857142857142, |
|
"grad_norm": 0.5124610662460327, |
|
"learning_rate": 1.6111111111111115e-05, |
|
"loss": 0.0017, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 10.285714285714286, |
|
"grad_norm": 0.014037979766726494, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.0008, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 10.428571428571429, |
|
"grad_norm": 0.020040445029735565, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.001, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 10.571428571428571, |
|
"grad_norm": 0.0177422147244215, |
|
"learning_rate": 1.4444444444444444e-05, |
|
"loss": 0.0007, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 10.714285714285714, |
|
"grad_norm": 0.02175714634358883, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.0011, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 10.857142857142858, |
|
"grad_norm": 0.016904808580875397, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0009, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 0.7300955653190613, |
|
"learning_rate": 1.2777777777777777e-05, |
|
"loss": 0.0026, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.0016724281013011932, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.2244, |
|
"eval_samples_per_second": 13.369, |
|
"eval_steps_per_second": 13.369, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 11.142857142857142, |
|
"grad_norm": 0.01708046905696392, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"loss": 0.001, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 11.285714285714286, |
|
"grad_norm": 0.025557199493050575, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.0011, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 0.015760347247123718, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.0008, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 11.571428571428571, |
|
"grad_norm": 0.4625304341316223, |
|
"learning_rate": 1.0555555555555555e-05, |
|
"loss": 0.002, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 11.714285714285714, |
|
"grad_norm": 0.02315388433635235, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0009, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 11.857142857142858, |
|
"grad_norm": 0.013032798655331135, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 0.0006, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.01681060716509819, |
|
"learning_rate": 8.88888888888889e-06, |
|
"loss": 0.0007, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9982174688057041, |
|
"eval_f1": 0.9411764705882353, |
|
"eval_loss": 0.006076255813241005, |
|
"eval_precision": 0.9230769230769231, |
|
"eval_recall": 0.96, |
|
"eval_runtime": 0.2313, |
|
"eval_samples_per_second": 12.968, |
|
"eval_steps_per_second": 12.968, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 12.142857142857142, |
|
"grad_norm": 0.021738462150096893, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0012, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 12.285714285714286, |
|
"grad_norm": 0.07413913309574127, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 0.001, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 12.428571428571429, |
|
"grad_norm": 0.015694081783294678, |
|
"learning_rate": 7.222222222222222e-06, |
|
"loss": 0.0007, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 12.571428571428571, |
|
"grad_norm": 0.07455814629793167, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0011, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 12.714285714285714, |
|
"grad_norm": 2.512777805328369, |
|
"learning_rate": 6.111111111111111e-06, |
|
"loss": 0.002, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 12.857142857142858, |
|
"grad_norm": 0.020945778116583824, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.0008, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 0.014726142399013042, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0006, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9982174688057041, |
|
"eval_f1": 0.9411764705882353, |
|
"eval_loss": 0.010821976698935032, |
|
"eval_precision": 0.9230769230769231, |
|
"eval_recall": 0.96, |
|
"eval_runtime": 0.2328, |
|
"eval_samples_per_second": 12.888, |
|
"eval_steps_per_second": 12.888, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 13.142857142857142, |
|
"grad_norm": 0.022652102634310722, |
|
"learning_rate": 4.444444444444445e-06, |
|
"loss": 0.0007, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 13.285714285714286, |
|
"grad_norm": 0.0192624032497406, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 0.0009, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 13.428571428571429, |
|
"grad_norm": 0.03646668419241905, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0009, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 13.571428571428571, |
|
"grad_norm": 0.02297368273139, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.0006, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 13.714285714285714, |
|
"grad_norm": 0.013909080997109413, |
|
"learning_rate": 2.2222222222222225e-06, |
|
"loss": 0.0006, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 13.857142857142858, |
|
"grad_norm": 0.021894708275794983, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.0008, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.019053662195801735, |
|
"learning_rate": 1.1111111111111112e-06, |
|
"loss": 0.0011, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9982174688057041, |
|
"eval_f1": 0.9411764705882353, |
|
"eval_loss": 0.011927765794098377, |
|
"eval_precision": 0.9230769230769231, |
|
"eval_recall": 0.96, |
|
"eval_runtime": 0.2367, |
|
"eval_samples_per_second": 12.675, |
|
"eval_steps_per_second": 12.675, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 14.142857142857142, |
|
"grad_norm": 0.015299996361136436, |
|
"learning_rate": 5.555555555555556e-07, |
|
"loss": 0.0007, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"grad_norm": 0.06155708432197571, |
|
"learning_rate": 0.0, |
|
"loss": 0.0009, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"eval_accuracy": 0.9982174688057041, |
|
"eval_f1": 0.9411764705882353, |
|
"eval_loss": 0.011938165873289108, |
|
"eval_precision": 0.9230769230769231, |
|
"eval_recall": 0.96, |
|
"eval_runtime": 0.459, |
|
"eval_samples_per_second": 6.535, |
|
"eval_steps_per_second": 6.535, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 52719651225600.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|