File size: 2,716 Bytes
fba6790
247c929
fba6790
 
 
 
 
 
 
 
 
 
247c929
 
 
fba6790
 
 
 
 
247c929
 
 
 
 
 
 
fba6790
 
 
 
247c929
 
 
fba6790
 
 
 
247c929
 
 
 
 
 
 
 
fba6790
 
 
 
247c929
 
 
fba6790
 
 
 
247c929
 
 
 
 
 
 
 
fba6790
 
 
 
247c929
 
 
fba6790
 
 
 
247c929
 
 
 
 
 
 
 
fba6790
 
 
 
247c929
fba6790
247c929
fba6790
247c929
fba6790
 
 
247c929
 
 
 
fba6790
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
{
  "best_metric": 0.5606653620352251,
  "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-384",
  "epoch": 4.0,
  "eval_steps": 500,
  "global_step": 384,
  "is_hyper_param_search": true,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 1.0,
      "grad_norm": 0.8228768110275269,
      "learning_rate": 1.922542063523116e-05,
      "loss": 0.7149,
      "step": 96
    },
    {
      "epoch": 1.0,
      "eval_accuracy": 0.5,
      "eval_f1": 0.007766990291262136,
      "eval_loss": 0.7131175398826599,
      "eval_precision": 0.5,
      "eval_recall": 0.003913894324853229,
      "eval_runtime": 26.5316,
      "eval_samples_per_second": 38.52,
      "eval_steps_per_second": 1.206,
      "step": 96
    },
    {
      "epoch": 2.0,
      "grad_norm": 2.1306586265563965,
      "learning_rate": 1.7089262786872146e-05,
      "loss": 0.7062,
      "step": 192
    },
    {
      "epoch": 2.0,
      "eval_accuracy": 0.5136986301369864,
      "eval_f1": 0.08133086876155268,
      "eval_loss": 0.7069234848022461,
      "eval_precision": 0.7333333333333333,
      "eval_recall": 0.043052837573385516,
      "eval_runtime": 26.8847,
      "eval_samples_per_second": 38.014,
      "eval_steps_per_second": 1.19,
      "step": 192
    },
    {
      "epoch": 3.0,
      "grad_norm": 1.5551536083221436,
      "learning_rate": 1.4953104938513126e-05,
      "loss": 0.6972,
      "step": 288
    },
    {
      "epoch": 3.0,
      "eval_accuracy": 0.512720156555773,
      "eval_f1": 0.08791208791208792,
      "eval_loss": 0.6998902559280396,
      "eval_precision": 0.6857142857142857,
      "eval_recall": 0.046966731898238745,
      "eval_runtime": 26.6683,
      "eval_samples_per_second": 38.323,
      "eval_steps_per_second": 1.2,
      "step": 288
    },
    {
      "epoch": 4.0,
      "grad_norm": 2.080883741378784,
      "learning_rate": 1.2816947090154107e-05,
      "loss": 0.6854,
      "step": 384
    },
    {
      "epoch": 4.0,
      "eval_accuracy": 0.5606653620352251,
      "eval_f1": 0.3145038167938931,
      "eval_loss": 0.6807624697685242,
      "eval_precision": 0.7152777777777778,
      "eval_recall": 0.20156555772994128,
      "eval_runtime": 26.6572,
      "eval_samples_per_second": 38.339,
      "eval_steps_per_second": 1.2,
      "step": 384
    }
  ],
  "logging_steps": 500,
  "max_steps": 960,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 10,
  "save_steps": 500,
  "total_flos": 881956222080.0,
  "train_batch_size": 32,
  "trial_name": null,
  "trial_params": {
    "alpha": 0.8789746577248793,
    "learning_rate": 2.136157848359018e-05,
    "num_train_epochs": 10,
    "temperature": 14
  }
}