File size: 1,760 Bytes
339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef a9b7c7c 339f1ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
{
"best_metric": 0.594059405940594,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-4/checkpoint-594",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 594,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.202705979347229,
"learning_rate": 6.504485936113838e-05,
"loss": 0.5495,
"step": 297
},
{
"epoch": 1.0,
"eval_accuracy": 0.5069306930693069,
"eval_f1": 0.10108303249097472,
"eval_loss": 0.5468167662620544,
"eval_mcc": 0.027838098756040194,
"eval_precision": 0.56,
"eval_recall": 0.05555555555555555,
"eval_runtime": 0.9316,
"eval_samples_per_second": 542.063,
"eval_steps_per_second": 17.174,
"step": 297
},
{
"epoch": 2.0,
"grad_norm": 0.8953370451927185,
"learning_rate": 5.4204049467615325e-05,
"loss": 0.54,
"step": 594
},
{
"epoch": 2.0,
"eval_accuracy": 0.594059405940594,
"eval_f1": 0.5858585858585857,
"eval_loss": 0.5394836068153381,
"eval_mcc": 0.18817791261380143,
"eval_precision": 0.5967078189300411,
"eval_recall": 0.5753968253968254,
"eval_runtime": 0.9335,
"eval_samples_per_second": 540.967,
"eval_steps_per_second": 17.14,
"step": 594
}
],
"logging_steps": 500,
"max_steps": 2079,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"total_flos": 1461402531960.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"alpha": 0.7404813991868276,
"learning_rate": 7.588566925466145e-05,
"num_train_epochs": 7,
"temperature": 47
}
}
|