File size: 1,092 Bytes
86a176b
0326c09
86a176b
 
 
 
 
 
 
 
 
 
0326c09
 
 
86a176b
 
 
 
0326c09
 
 
 
 
 
 
 
86a176b
 
 
 
0326c09
86a176b
0326c09
86a176b
 
 
 
 
0326c09
 
 
 
86a176b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
{
  "best_metric": 0.5,
  "best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-96",
  "epoch": 1.0,
  "eval_steps": 500,
  "global_step": 96,
  "is_hyper_param_search": true,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 1.0,
      "grad_norm": 0.4667714238166809,
      "learning_rate": 7.759069356217338e-06,
      "loss": 0.317,
      "step": 96
    },
    {
      "epoch": 1.0,
      "eval_accuracy": 0.5,
      "eval_f1": 0.0,
      "eval_loss": 0.26975956559181213,
      "eval_precision": 0.0,
      "eval_recall": 0.0,
      "eval_runtime": 28.3197,
      "eval_samples_per_second": 36.088,
      "eval_steps_per_second": 1.13,
      "step": 96
    }
  ],
  "logging_steps": 500,
  "max_steps": 384,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 4,
  "save_steps": 500,
  "total_flos": 235695197280.0,
  "train_batch_size": 32,
  "trial_name": null,
  "trial_params": {
    "alpha": 0.2995113868248127,
    "learning_rate": 1.0345425808289785e-05,
    "num_train_epochs": 4,
    "temperature": 18
  }
}