|
{ |
|
"best_metric": 0.8297455968688845, |
|
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-8/checkpoint-576", |
|
"epoch": 6.0, |
|
"eval_steps": 500, |
|
"global_step": 576, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 3.2297720909118652, |
|
"learning_rate": 0.0003356185870926135, |
|
"loss": 0.5529, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7602739726027398, |
|
"eval_f1": 0.7973531844499586, |
|
"eval_loss": 0.4773445129394531, |
|
"eval_precision": 0.6905444126074498, |
|
"eval_recall": 0.9432485322896281, |
|
"eval_runtime": 28.1534, |
|
"eval_samples_per_second": 36.301, |
|
"eval_steps_per_second": 1.137, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.981153130531311, |
|
"learning_rate": 0.00026849486967409075, |
|
"loss": 0.4542, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8199608610567515, |
|
"eval_f1": 0.8365896980461812, |
|
"eval_loss": 0.42821842432022095, |
|
"eval_precision": 0.7658536585365854, |
|
"eval_recall": 0.9217221135029354, |
|
"eval_runtime": 28.2265, |
|
"eval_samples_per_second": 36.207, |
|
"eval_steps_per_second": 1.134, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 4.941507339477539, |
|
"learning_rate": 0.0002013711522555681, |
|
"loss": 0.4191, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.821917808219178, |
|
"eval_f1": 0.8311688311688312, |
|
"eval_loss": 0.44406670331954956, |
|
"eval_precision": 0.7901234567901234, |
|
"eval_recall": 0.8767123287671232, |
|
"eval_runtime": 28.0469, |
|
"eval_samples_per_second": 36.439, |
|
"eval_steps_per_second": 1.141, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 7.470360279083252, |
|
"learning_rate": 0.00013424743483704538, |
|
"loss": 0.4059, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8170254403131115, |
|
"eval_f1": 0.8186226964112512, |
|
"eval_loss": 0.4284485876560211, |
|
"eval_precision": 0.8115384615384615, |
|
"eval_recall": 0.8258317025440313, |
|
"eval_runtime": 28.1941, |
|
"eval_samples_per_second": 36.249, |
|
"eval_steps_per_second": 1.135, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 2.5780177116394043, |
|
"learning_rate": 6.712371741852269e-05, |
|
"loss": 0.398, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8258317025440313, |
|
"eval_f1": 0.8378870673952642, |
|
"eval_loss": 0.4101215600967407, |
|
"eval_precision": 0.7836456558773425, |
|
"eval_recall": 0.9001956947162426, |
|
"eval_runtime": 28.0686, |
|
"eval_samples_per_second": 36.411, |
|
"eval_steps_per_second": 1.14, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 4.741052150726318, |
|
"learning_rate": 0.0, |
|
"loss": 0.3876, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8297455968688845, |
|
"eval_f1": 0.8370786516853934, |
|
"eval_loss": 0.4097915291786194, |
|
"eval_precision": 0.8025134649910234, |
|
"eval_recall": 0.8747553816046967, |
|
"eval_runtime": 29.2803, |
|
"eval_samples_per_second": 34.904, |
|
"eval_steps_per_second": 1.093, |
|
"step": 576 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 576, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 500, |
|
"total_flos": 1414171183680.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.8957226795607325, |
|
"learning_rate": 0.0004027423045111362, |
|
"num_train_epochs": 6, |
|
"temperature": 3 |
|
} |
|
} |
|
|