|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.0,
|
|
"eval_steps": 500,
|
|
"global_step": 38601,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03885909691458771,
|
|
"grad_norm": 0.45540618896484375,
|
|
"learning_rate": 1.9740939353902752e-05,
|
|
"loss": 0.4641,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.07771819382917541,
|
|
"grad_norm": 0.05885039269924164,
|
|
"learning_rate": 1.94818787078055e-05,
|
|
"loss": 0.0073,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.11657729074376312,
|
|
"grad_norm": 0.0643157884478569,
|
|
"learning_rate": 1.922281806170825e-05,
|
|
"loss": 0.0066,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.15543638765835083,
|
|
"grad_norm": 0.025738418102264404,
|
|
"learning_rate": 1.8963757415611e-05,
|
|
"loss": 0.0055,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 0.19429548457293852,
|
|
"grad_norm": 0.006490091327577829,
|
|
"learning_rate": 1.8704696769513745e-05,
|
|
"loss": 0.0018,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 0.23315458148752624,
|
|
"grad_norm": 0.003259174292907119,
|
|
"learning_rate": 1.8445636123416492e-05,
|
|
"loss": 0.0017,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 0.27201367840211393,
|
|
"grad_norm": 0.002379123354330659,
|
|
"learning_rate": 1.8186575477319242e-05,
|
|
"loss": 0.0004,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 0.31087277531670165,
|
|
"grad_norm": 0.0013682579156011343,
|
|
"learning_rate": 1.7927514831221992e-05,
|
|
"loss": 0.0005,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 0.3497318722312893,
|
|
"grad_norm": 0.003730255179107189,
|
|
"learning_rate": 1.766845418512474e-05,
|
|
"loss": 0.0061,
|
|
"step": 4500
|
|
},
|
|
{
|
|
"epoch": 0.38859096914587704,
|
|
"grad_norm": 0.0010942475637421012,
|
|
"learning_rate": 1.740939353902749e-05,
|
|
"loss": 0.0015,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 0.42745006606046476,
|
|
"grad_norm": 0.0026132045313715935,
|
|
"learning_rate": 1.7150332892930236e-05,
|
|
"loss": 0.002,
|
|
"step": 5500
|
|
},
|
|
{
|
|
"epoch": 0.4663091629750525,
|
|
"grad_norm": 0.0005335228051990271,
|
|
"learning_rate": 1.6891272246832986e-05,
|
|
"loss": 0.0008,
|
|
"step": 6000
|
|
},
|
|
{
|
|
"epoch": 0.5051682598896402,
|
|
"grad_norm": 0.0023255019914358854,
|
|
"learning_rate": 1.6632211600735732e-05,
|
|
"loss": 0.0009,
|
|
"step": 6500
|
|
},
|
|
{
|
|
"epoch": 0.5440273568042279,
|
|
"grad_norm": 0.0013378489529713988,
|
|
"learning_rate": 1.6373150954638482e-05,
|
|
"loss": 0.0019,
|
|
"step": 7000
|
|
},
|
|
{
|
|
"epoch": 0.5828864537188155,
|
|
"grad_norm": 0.0021079631987959146,
|
|
"learning_rate": 1.611409030854123e-05,
|
|
"loss": 0.0099,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 0.6217455506334033,
|
|
"grad_norm": 0.00038598832907155156,
|
|
"learning_rate": 1.585502966244398e-05,
|
|
"loss": 0.0001,
|
|
"step": 8000
|
|
},
|
|
{
|
|
"epoch": 0.660604647547991,
|
|
"grad_norm": 0.0005731793353334069,
|
|
"learning_rate": 1.559596901634673e-05,
|
|
"loss": 0.0013,
|
|
"step": 8500
|
|
},
|
|
{
|
|
"epoch": 0.6994637444625786,
|
|
"grad_norm": 0.00023792145657353103,
|
|
"learning_rate": 1.5336908370249476e-05,
|
|
"loss": 0.0001,
|
|
"step": 9000
|
|
},
|
|
{
|
|
"epoch": 0.7383228413771664,
|
|
"grad_norm": 0.0007500049541704357,
|
|
"learning_rate": 1.5077847724152226e-05,
|
|
"loss": 0.0025,
|
|
"step": 9500
|
|
},
|
|
{
|
|
"epoch": 0.7771819382917541,
|
|
"grad_norm": 0.00044937318307347596,
|
|
"learning_rate": 1.4818787078054974e-05,
|
|
"loss": 0.0026,
|
|
"step": 10000
|
|
},
|
|
{
|
|
"epoch": 0.8160410352063419,
|
|
"grad_norm": 0.0006425898754969239,
|
|
"learning_rate": 1.4559726431957721e-05,
|
|
"loss": 0.0006,
|
|
"step": 10500
|
|
},
|
|
{
|
|
"epoch": 0.8549001321209295,
|
|
"grad_norm": 0.0006235554465092719,
|
|
"learning_rate": 1.430066578586047e-05,
|
|
"loss": 0.0012,
|
|
"step": 11000
|
|
},
|
|
{
|
|
"epoch": 0.8937592290355172,
|
|
"grad_norm": 0.0004008706600870937,
|
|
"learning_rate": 1.404160513976322e-05,
|
|
"loss": 0.0017,
|
|
"step": 11500
|
|
},
|
|
{
|
|
"epoch": 0.932618325950105,
|
|
"grad_norm": 0.0004875475133303553,
|
|
"learning_rate": 1.3782544493665968e-05,
|
|
"loss": 0.0001,
|
|
"step": 12000
|
|
},
|
|
{
|
|
"epoch": 0.9714774228646926,
|
|
"grad_norm": 0.0015087984502315521,
|
|
"learning_rate": 1.3523483847568716e-05,
|
|
"loss": 0.0011,
|
|
"step": 12500
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.9999115207373271,
|
|
"eval_f1": 0.9998912136925701,
|
|
"eval_loss": 0.0008539558621123433,
|
|
"eval_precision": 0.9999032998912124,
|
|
"eval_recall": 0.9998791277861045,
|
|
"eval_runtime": 21.9409,
|
|
"eval_samples_per_second": 930.682,
|
|
"eval_steps_per_second": 116.358,
|
|
"step": 12867
|
|
},
|
|
{
|
|
"epoch": 1.0103365197792804,
|
|
"grad_norm": 0.0007504357490688562,
|
|
"learning_rate": 1.3264423201471466e-05,
|
|
"loss": 0.0012,
|
|
"step": 13000
|
|
},
|
|
{
|
|
"epoch": 1.049195616693868,
|
|
"grad_norm": 9.457436681259423e-05,
|
|
"learning_rate": 1.3005362555374215e-05,
|
|
"loss": 0.0001,
|
|
"step": 13500
|
|
},
|
|
{
|
|
"epoch": 1.0880547136084557,
|
|
"grad_norm": 0.0005767066031694412,
|
|
"learning_rate": 1.2746301909276963e-05,
|
|
"loss": 0.0006,
|
|
"step": 14000
|
|
},
|
|
{
|
|
"epoch": 1.1269138105230434,
|
|
"grad_norm": 0.0004582173132803291,
|
|
"learning_rate": 1.2487241263179713e-05,
|
|
"loss": 0.0045,
|
|
"step": 14500
|
|
},
|
|
{
|
|
"epoch": 1.165772907437631,
|
|
"grad_norm": 0.0006144808721728623,
|
|
"learning_rate": 1.222818061708246e-05,
|
|
"loss": 0.0053,
|
|
"step": 15000
|
|
},
|
|
{
|
|
"epoch": 1.2046320043522187,
|
|
"grad_norm": 0.0002801430528052151,
|
|
"learning_rate": 1.1969119970985208e-05,
|
|
"loss": 0.0005,
|
|
"step": 15500
|
|
},
|
|
{
|
|
"epoch": 1.2434911012668066,
|
|
"grad_norm": 0.001027058344334364,
|
|
"learning_rate": 1.1710059324887957e-05,
|
|
"loss": 0.002,
|
|
"step": 16000
|
|
},
|
|
{
|
|
"epoch": 1.2823501981813943,
|
|
"grad_norm": 0.00030287433764897287,
|
|
"learning_rate": 1.1450998678790705e-05,
|
|
"loss": 0.0001,
|
|
"step": 16500
|
|
},
|
|
{
|
|
"epoch": 1.321209295095982,
|
|
"grad_norm": 9.348896855954081e-05,
|
|
"learning_rate": 1.1191938032693455e-05,
|
|
"loss": 0.0,
|
|
"step": 17000
|
|
},
|
|
{
|
|
"epoch": 1.3600683920105696,
|
|
"grad_norm": 4.3804615415865555e-05,
|
|
"learning_rate": 1.0932877386596204e-05,
|
|
"loss": 0.0,
|
|
"step": 17500
|
|
},
|
|
{
|
|
"epoch": 1.3989274889251573,
|
|
"grad_norm": 5.0469727284507826e-05,
|
|
"learning_rate": 1.0673816740498952e-05,
|
|
"loss": 0.0012,
|
|
"step": 18000
|
|
},
|
|
{
|
|
"epoch": 1.4377865858397452,
|
|
"grad_norm": 8.691708353580907e-05,
|
|
"learning_rate": 1.0414756094401702e-05,
|
|
"loss": 0.0004,
|
|
"step": 18500
|
|
},
|
|
{
|
|
"epoch": 1.4766456827543328,
|
|
"grad_norm": 6.273793405853212e-05,
|
|
"learning_rate": 1.0155695448304449e-05,
|
|
"loss": 0.0005,
|
|
"step": 19000
|
|
},
|
|
{
|
|
"epoch": 1.5155047796689205,
|
|
"grad_norm": 9.822064748732373e-05,
|
|
"learning_rate": 9.896634802207197e-06,
|
|
"loss": 0.0,
|
|
"step": 19500
|
|
},
|
|
{
|
|
"epoch": 1.5543638765835082,
|
|
"grad_norm": 0.00012862567382398993,
|
|
"learning_rate": 9.637574156109945e-06,
|
|
"loss": 0.0005,
|
|
"step": 20000
|
|
},
|
|
{
|
|
"epoch": 1.5932229734980958,
|
|
"grad_norm": 0.013405167497694492,
|
|
"learning_rate": 9.378513510012694e-06,
|
|
"loss": 0.0005,
|
|
"step": 20500
|
|
},
|
|
{
|
|
"epoch": 1.6320820704126837,
|
|
"grad_norm": 6.82063982822001e-05,
|
|
"learning_rate": 9.119452863915444e-06,
|
|
"loss": 0.0,
|
|
"step": 21000
|
|
},
|
|
{
|
|
"epoch": 1.6709411673272712,
|
|
"grad_norm": 0.0001184305947390385,
|
|
"learning_rate": 8.860392217818192e-06,
|
|
"loss": 0.0006,
|
|
"step": 21500
|
|
},
|
|
{
|
|
"epoch": 1.709800264241859,
|
|
"grad_norm": 0.0004617497615981847,
|
|
"learning_rate": 8.60133157172094e-06,
|
|
"loss": 0.0,
|
|
"step": 22000
|
|
},
|
|
{
|
|
"epoch": 1.7486593611564467,
|
|
"grad_norm": 9.478950232733041e-05,
|
|
"learning_rate": 8.342270925623689e-06,
|
|
"loss": 0.0,
|
|
"step": 22500
|
|
},
|
|
{
|
|
"epoch": 1.7875184580710344,
|
|
"grad_norm": 0.0005689120152965188,
|
|
"learning_rate": 8.083210279526437e-06,
|
|
"loss": 0.0013,
|
|
"step": 23000
|
|
},
|
|
{
|
|
"epoch": 1.8263775549856223,
|
|
"grad_norm": 3.0777548090554774e-05,
|
|
"learning_rate": 7.824149633429187e-06,
|
|
"loss": 0.0002,
|
|
"step": 23500
|
|
},
|
|
{
|
|
"epoch": 1.8652366519002097,
|
|
"grad_norm": 0.00029757359880022705,
|
|
"learning_rate": 7.565088987331936e-06,
|
|
"loss": 0.001,
|
|
"step": 24000
|
|
},
|
|
{
|
|
"epoch": 1.9040957488147976,
|
|
"grad_norm": 0.0001594646309968084,
|
|
"learning_rate": 7.306028341234683e-06,
|
|
"loss": 0.0005,
|
|
"step": 24500
|
|
},
|
|
{
|
|
"epoch": 1.9429548457293853,
|
|
"grad_norm": 3.8429996493505314e-05,
|
|
"learning_rate": 7.046967695137432e-06,
|
|
"loss": 0.0009,
|
|
"step": 25000
|
|
},
|
|
{
|
|
"epoch": 1.981813942643973,
|
|
"grad_norm": 0.0016445047222077847,
|
|
"learning_rate": 6.787907049040181e-06,
|
|
"loss": 0.002,
|
|
"step": 25500
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.9999188940092166,
|
|
"eval_f1": 0.9998670389575854,
|
|
"eval_loss": 0.0004210107435937971,
|
|
"eval_precision": 0.9998791248640154,
|
|
"eval_recall": 0.9998549533433254,
|
|
"eval_runtime": 26.3407,
|
|
"eval_samples_per_second": 775.225,
|
|
"eval_steps_per_second": 96.922,
|
|
"step": 25734
|
|
},
|
|
{
|
|
"epoch": 2.020673039558561,
|
|
"grad_norm": 0.010017353110015392,
|
|
"learning_rate": 6.52884640294293e-06,
|
|
"loss": 0.0015,
|
|
"step": 26000
|
|
},
|
|
{
|
|
"epoch": 2.0595321364731483,
|
|
"grad_norm": 0.0025657187215983868,
|
|
"learning_rate": 6.269785756845678e-06,
|
|
"loss": 0.0028,
|
|
"step": 26500
|
|
},
|
|
{
|
|
"epoch": 2.098391233387736,
|
|
"grad_norm": 0.00022906585945747793,
|
|
"learning_rate": 6.010725110748426e-06,
|
|
"loss": 0.0001,
|
|
"step": 27000
|
|
},
|
|
{
|
|
"epoch": 2.1372503303023236,
|
|
"grad_norm": 6.71066518407315e-05,
|
|
"learning_rate": 5.751664464651175e-06,
|
|
"loss": 0.0006,
|
|
"step": 27500
|
|
},
|
|
{
|
|
"epoch": 2.1761094272169115,
|
|
"grad_norm": 0.00021527826902456582,
|
|
"learning_rate": 5.492603818553925e-06,
|
|
"loss": 0.0,
|
|
"step": 28000
|
|
},
|
|
{
|
|
"epoch": 2.2149685241314994,
|
|
"grad_norm": 9.48450033320114e-05,
|
|
"learning_rate": 5.233543172456672e-06,
|
|
"loss": 0.0005,
|
|
"step": 28500
|
|
},
|
|
{
|
|
"epoch": 2.253827621046087,
|
|
"grad_norm": 0.00010121305240318179,
|
|
"learning_rate": 4.9744825263594205e-06,
|
|
"loss": 0.0,
|
|
"step": 29000
|
|
},
|
|
{
|
|
"epoch": 2.2926867179606747,
|
|
"grad_norm": 8.369102579308674e-05,
|
|
"learning_rate": 4.71542188026217e-06,
|
|
"loss": 0.0,
|
|
"step": 29500
|
|
},
|
|
{
|
|
"epoch": 2.331545814875262,
|
|
"grad_norm": 5.066412632004358e-05,
|
|
"learning_rate": 4.456361234164918e-06,
|
|
"loss": 0.0,
|
|
"step": 30000
|
|
},
|
|
{
|
|
"epoch": 2.37040491178985,
|
|
"grad_norm": 0.00012291729217395186,
|
|
"learning_rate": 4.197300588067667e-06,
|
|
"loss": 0.0008,
|
|
"step": 30500
|
|
},
|
|
{
|
|
"epoch": 2.4092640087044375,
|
|
"grad_norm": 0.00014957574603613466,
|
|
"learning_rate": 3.938239941970416e-06,
|
|
"loss": 0.0,
|
|
"step": 31000
|
|
},
|
|
{
|
|
"epoch": 2.4481231056190254,
|
|
"grad_norm": 4.759750663652085e-05,
|
|
"learning_rate": 3.679179295873164e-06,
|
|
"loss": 0.0,
|
|
"step": 31500
|
|
},
|
|
{
|
|
"epoch": 2.4869822025336132,
|
|
"grad_norm": 3.8031983422115445e-05,
|
|
"learning_rate": 3.4201186497759125e-06,
|
|
"loss": 0.0,
|
|
"step": 32000
|
|
},
|
|
{
|
|
"epoch": 2.5258412994482007,
|
|
"grad_norm": 8.281264308607206e-05,
|
|
"learning_rate": 3.1610580036786613e-06,
|
|
"loss": 0.0,
|
|
"step": 32500
|
|
},
|
|
{
|
|
"epoch": 2.5647003963627886,
|
|
"grad_norm": 8.475359209114686e-05,
|
|
"learning_rate": 2.9019973575814097e-06,
|
|
"loss": 0.0,
|
|
"step": 33000
|
|
},
|
|
{
|
|
"epoch": 2.603559493277376,
|
|
"grad_norm": 8.979766425909474e-05,
|
|
"learning_rate": 2.6429367114841585e-06,
|
|
"loss": 0.0,
|
|
"step": 33500
|
|
},
|
|
{
|
|
"epoch": 2.642418590191964,
|
|
"grad_norm": 0.0025826424825936556,
|
|
"learning_rate": 2.3838760653869073e-06,
|
|
"loss": 0.0007,
|
|
"step": 34000
|
|
},
|
|
{
|
|
"epoch": 2.681277687106552,
|
|
"grad_norm": 8.839767542667687e-05,
|
|
"learning_rate": 2.1248154192896557e-06,
|
|
"loss": 0.0007,
|
|
"step": 34500
|
|
},
|
|
{
|
|
"epoch": 2.7201367840211392,
|
|
"grad_norm": 3.709522934514098e-05,
|
|
"learning_rate": 1.8657547731924045e-06,
|
|
"loss": 0.0,
|
|
"step": 35000
|
|
},
|
|
{
|
|
"epoch": 2.758995880935727,
|
|
"grad_norm": 0.00011620597069850191,
|
|
"learning_rate": 1.6066941270951531e-06,
|
|
"loss": 0.0005,
|
|
"step": 35500
|
|
},
|
|
{
|
|
"epoch": 2.7978549778503146,
|
|
"grad_norm": 3.431947698118165e-05,
|
|
"learning_rate": 1.3476334809979017e-06,
|
|
"loss": 0.0,
|
|
"step": 36000
|
|
},
|
|
{
|
|
"epoch": 2.8367140747649024,
|
|
"grad_norm": 8.798589260550216e-05,
|
|
"learning_rate": 1.0885728349006503e-06,
|
|
"loss": 0.0,
|
|
"step": 36500
|
|
},
|
|
{
|
|
"epoch": 2.8755731716794903,
|
|
"grad_norm": 2.6514268029131927e-05,
|
|
"learning_rate": 8.29512188803399e-07,
|
|
"loss": 0.0,
|
|
"step": 37000
|
|
},
|
|
{
|
|
"epoch": 2.914432268594078,
|
|
"grad_norm": 2.451765067235101e-05,
|
|
"learning_rate": 5.704515427061476e-07,
|
|
"loss": 0.0,
|
|
"step": 37500
|
|
},
|
|
{
|
|
"epoch": 2.9532913655086657,
|
|
"grad_norm": 1.2271469131519552e-05,
|
|
"learning_rate": 3.113908966088962e-07,
|
|
"loss": 0.0,
|
|
"step": 38000
|
|
},
|
|
{
|
|
"epoch": 2.992150462423253,
|
|
"grad_norm": 3.4994409361388534e-05,
|
|
"learning_rate": 5.233025051164478e-08,
|
|
"loss": 0.0005,
|
|
"step": 38500
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.9999188940092166,
|
|
"eval_f1": 0.9998549533433254,
|
|
"eval_loss": 0.0004571246390696615,
|
|
"eval_precision": 0.9998549533433254,
|
|
"eval_recall": 0.9998549533433254,
|
|
"eval_runtime": 40.6059,
|
|
"eval_samples_per_second": 502.882,
|
|
"eval_steps_per_second": 62.873,
|
|
"step": 38601
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"step": 38601,
|
|
"total_flos": 1954036863526950.0,
|
|
"train_loss": 0.007192428305773462,
|
|
"train_runtime": 2269.357,
|
|
"train_samples_per_second": 136.071,
|
|
"train_steps_per_second": 17.01
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 38601,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1954036863526950.0,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|