xuancoblab2023 commited on
Commit
10f7e8f
·
verified ·
1 Parent(s): 4f0dbba

Training in progress, epoch 11

Browse files
Files changed (42) hide show
  1. logs/events.out.tfevents.1713593213.1376c752d37a.9760.53 +3 -0
  2. logs/events.out.tfevents.1713593452.1376c752d37a.9760.54 +3 -0
  3. model.safetensors +1 -1
  4. run-0/checkpoint-214/model.safetensors +1 -1
  5. run-0/checkpoint-214/optimizer.pt +1 -1
  6. run-0/checkpoint-214/scheduler.pt +1 -1
  7. run-0/checkpoint-214/trainer_state.json +13 -13
  8. run-0/checkpoint-214/training_args.bin +1 -1
  9. run-0/checkpoint-2996/config.json +34 -0
  10. run-0/checkpoint-2996/model.safetensors +3 -0
  11. run-0/checkpoint-2996/optimizer.pt +3 -0
  12. run-0/checkpoint-2996/rng_state.pth +3 -0
  13. run-0/checkpoint-2996/scheduler.pt +3 -0
  14. run-0/checkpoint-2996/special_tokens_map.json +7 -0
  15. run-0/checkpoint-2996/tokenizer.json +0 -0
  16. run-0/checkpoint-2996/tokenizer_config.json +57 -0
  17. run-0/checkpoint-2996/trainer_state.json +306 -0
  18. run-0/checkpoint-2996/training_args.bin +3 -0
  19. run-0/checkpoint-2996/vocab.txt +0 -0
  20. run-1/checkpoint-2140/config.json +34 -0
  21. run-1/checkpoint-2140/model.safetensors +3 -0
  22. run-1/checkpoint-2140/optimizer.pt +3 -0
  23. run-1/checkpoint-2140/rng_state.pth +3 -0
  24. run-1/checkpoint-2140/scheduler.pt +3 -0
  25. run-1/checkpoint-2140/special_tokens_map.json +7 -0
  26. run-1/checkpoint-2140/tokenizer.json +0 -0
  27. run-1/checkpoint-2140/tokenizer_config.json +57 -0
  28. run-1/checkpoint-2140/trainer_state.json +226 -0
  29. run-1/checkpoint-2140/training_args.bin +3 -0
  30. run-1/checkpoint-2140/vocab.txt +0 -0
  31. run-1/checkpoint-2354/config.json +34 -0
  32. run-1/checkpoint-2354/model.safetensors +3 -0
  33. run-1/checkpoint-2354/optimizer.pt +3 -0
  34. run-1/checkpoint-2354/rng_state.pth +3 -0
  35. run-1/checkpoint-2354/scheduler.pt +3 -0
  36. run-1/checkpoint-2354/special_tokens_map.json +7 -0
  37. run-1/checkpoint-2354/tokenizer.json +0 -0
  38. run-1/checkpoint-2354/tokenizer_config.json +57 -0
  39. run-1/checkpoint-2354/trainer_state.json +246 -0
  40. run-1/checkpoint-2354/training_args.bin +3 -0
  41. run-1/checkpoint-2354/vocab.txt +0 -0
  42. training_args.bin +1 -1
logs/events.out.tfevents.1713593213.1376c752d37a.9760.53 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e9d3d6e640e32578cdda671e0d9e24e50fb34c17e4467b100f3b1d287c108c1
3
+ size 15328
logs/events.out.tfevents.1713593452.1376c752d37a.9760.54 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c29bb2f106d8d7d746635fe5aed243dec0ba91996f45ad0adca8c65610500cc8
3
+ size 13136
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c399262b4bc15f678ef2b728246acd92feedf44bb90dd9e1ef4d604de9ef865
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbec1f24b0dd6424b800ee43d82ad08f76324c77508985422e71a486f73e8ae5
3
  size 17549312
run-0/checkpoint-214/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d42def1b572010b544bb54d52b3910c1f10bf8ffb3c8f4a2c972b149278d0bbb
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:516adb2bc811a1a0e97e9b5f0a9a920601cd2fa195fec8c8e40447d5c6720f23
3
  size 17549312
run-0/checkpoint-214/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f665e118607b5ec68f214da2f3af2546c52aabebc9c78c639c9aeff58a701e0
3
  size 35123898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfb00241f34d7682f0ce30a5d1baba6baec6c4c2bb17db518950d668a0d43db6
3
  size 35123898
run-0/checkpoint-214/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aee53237cef75e0520099d381b5061db996c5b968d44a3f9e5bb8dd5d455c932
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc2ee9f11db6435a9399184a28e4949defdb7cd018d93c93beb40c85b67545d
3
  size 1064
run-0/checkpoint-214/trainer_state.json CHANGED
@@ -10,37 +10,37 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 2.621731758117676,
14
- "learning_rate": 8.86126667135369e-06,
15
- "loss": 0.5024,
16
  "step": 214
17
  },
18
  {
19
  "epoch": 1.0,
20
  "eval_accuracy": 0.6666666666666666,
21
  "eval_f1": 0.0,
22
- "eval_loss": 0.4761257767677307,
23
  "eval_mcc": 0.0,
24
  "eval_precision": 0.0,
25
  "eval_recall": 0.0,
26
- "eval_runtime": 3.027,
27
- "eval_samples_per_second": 563.925,
28
- "eval_steps_per_second": 17.839,
29
  "step": 214
30
  }
31
  ],
32
  "logging_steps": 500,
33
- "max_steps": 856,
34
  "num_input_tokens_seen": 0,
35
- "num_train_epochs": 4,
36
  "save_steps": 500,
37
  "total_flos": 524775664440.0,
38
  "train_batch_size": 32,
39
  "trial_name": null,
40
  "trial_params": {
41
- "alpha": 0.7337380456487411,
42
- "learning_rate": 1.1815022228471587e-05,
43
- "num_train_epochs": 4,
44
- "temperature": 14
45
  }
46
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 0.3824027478694916,
14
+ "learning_rate": 5.531898354978819e-05,
15
+ "loss": 0.0835,
16
  "step": 214
17
  },
18
  {
19
  "epoch": 1.0,
20
  "eval_accuracy": 0.6666666666666666,
21
  "eval_f1": 0.0,
22
+ "eval_loss": 0.06421080976724625,
23
  "eval_mcc": 0.0,
24
  "eval_precision": 0.0,
25
  "eval_recall": 0.0,
26
+ "eval_runtime": 3.1927,
27
+ "eval_samples_per_second": 534.663,
28
+ "eval_steps_per_second": 16.914,
29
  "step": 214
30
  }
31
  ],
32
  "logging_steps": 500,
33
+ "max_steps": 2996,
34
  "num_input_tokens_seen": 0,
35
+ "num_train_epochs": 14,
36
  "save_steps": 500,
37
  "total_flos": 524775664440.0,
38
  "train_batch_size": 32,
39
  "trial_name": null,
40
  "trial_params": {
41
+ "alpha": 0.08651897585698409,
42
+ "learning_rate": 5.9574289976694975e-05,
43
+ "num_train_epochs": 14,
44
+ "temperature": 43
45
  }
46
  }
run-0/checkpoint-214/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1ef4ebce0580268fde2cec4cf96d669c8d482ee4f83099bbcdf4ab3eb2af99d
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43fb637dc58b3d23529e81f9c2590c1fd459503a7bb87369b51fad6346ae41da
3
  size 5048
run-0/checkpoint-2996/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 128,
11
+ "id2label": {
12
+ "0": "negative",
13
+ "1": "positive"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 512,
17
+ "label2id": {
18
+ "negative": "0",
19
+ "positive": "1"
20
+ },
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
+ "model_type": "bert",
24
+ "num_attention_heads": 2,
25
+ "num_hidden_layers": 2,
26
+ "pad_token_id": 0,
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.40.0",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 30522
34
+ }
run-0/checkpoint-2996/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:627b2a2f4dd817404904b0cdda0393b4dec358c0b8917136b69f256eb64145af
3
+ size 17549312
run-0/checkpoint-2996/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b2973bf2e8f0138441be63baed958fed94e3ef55ad1f39bdf0aaeaafeb8d6a6
3
+ size 35123898
run-0/checkpoint-2996/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9426185b39d441044107ee0bb63490b78521dba8c90fe388accfdd0dbcbf9fec
3
+ size 14308
run-0/checkpoint-2996/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c2c65264150b0914a79691226988115a8aea931aade13a5fe5e31ff8109390
3
+ size 1064
run-0/checkpoint-2996/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-0/checkpoint-2996/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-0/checkpoint-2996/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
run-0/checkpoint-2996/trainer_state.json ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6666666666666666,
3
+ "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-214",
4
+ "epoch": 14.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2996,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.3824027478694916,
14
+ "learning_rate": 5.531898354978819e-05,
15
+ "loss": 0.0835,
16
+ "step": 214
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6666666666666666,
21
+ "eval_f1": 0.0,
22
+ "eval_loss": 0.06421080976724625,
23
+ "eval_mcc": 0.0,
24
+ "eval_precision": 0.0,
25
+ "eval_recall": 0.0,
26
+ "eval_runtime": 3.1927,
27
+ "eval_samples_per_second": 534.663,
28
+ "eval_steps_per_second": 16.914,
29
+ "step": 214
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 0.4931301176548004,
34
+ "learning_rate": 5.10636771228814e-05,
35
+ "loss": 0.066,
36
+ "step": 428
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.6666666666666666,
41
+ "eval_f1": 0.006980802792321117,
42
+ "eval_loss": 0.06297493726015091,
43
+ "eval_mcc": 0.01713474628469157,
44
+ "eval_precision": 0.5,
45
+ "eval_recall": 0.0035149384885764497,
46
+ "eval_runtime": 3.3168,
47
+ "eval_samples_per_second": 514.655,
48
+ "eval_steps_per_second": 16.281,
49
+ "step": 428
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "grad_norm": 0.38297247886657715,
54
+ "learning_rate": 4.6808370695974625e-05,
55
+ "loss": 0.0648,
56
+ "step": 642
57
+ },
58
+ {
59
+ "epoch": 3.0,
60
+ "eval_accuracy": 0.6666666666666666,
61
+ "eval_f1": 0.013864818024263433,
62
+ "eval_loss": 0.06352359801530838,
63
+ "eval_mcc": 0.024260699053001704,
64
+ "eval_precision": 0.5,
65
+ "eval_recall": 0.007029876977152899,
66
+ "eval_runtime": 3.1502,
67
+ "eval_samples_per_second": 541.864,
68
+ "eval_steps_per_second": 17.142,
69
+ "step": 642
70
+ },
71
+ {
72
+ "epoch": 4.0,
73
+ "grad_norm": 0.4884311258792877,
74
+ "learning_rate": 4.255306426906784e-05,
75
+ "loss": 0.0642,
76
+ "step": 856
77
+ },
78
+ {
79
+ "epoch": 4.0,
80
+ "eval_accuracy": 0.6666666666666666,
81
+ "eval_f1": 0.017271157167530225,
82
+ "eval_loss": 0.06221030279994011,
83
+ "eval_mcc": 0.027140265094376777,
84
+ "eval_precision": 0.5,
85
+ "eval_recall": 0.008787346221441126,
86
+ "eval_runtime": 3.7762,
87
+ "eval_samples_per_second": 452.044,
88
+ "eval_steps_per_second": 14.3,
89
+ "step": 856
90
+ },
91
+ {
92
+ "epoch": 5.0,
93
+ "grad_norm": 0.7101069688796997,
94
+ "learning_rate": 3.829775784216106e-05,
95
+ "loss": 0.064,
96
+ "step": 1070
97
+ },
98
+ {
99
+ "epoch": 5.0,
100
+ "eval_accuracy": 0.6666666666666666,
101
+ "eval_f1": 0.017271157167530225,
102
+ "eval_loss": 0.06226570904254913,
103
+ "eval_mcc": 0.027140265094376777,
104
+ "eval_precision": 0.5,
105
+ "eval_recall": 0.008787346221441126,
106
+ "eval_runtime": 3.1334,
107
+ "eval_samples_per_second": 544.769,
108
+ "eval_steps_per_second": 17.233,
109
+ "step": 1070
110
+ },
111
+ {
112
+ "epoch": 6.0,
113
+ "grad_norm": 0.42137953639030457,
114
+ "learning_rate": 3.404245141525427e-05,
115
+ "loss": 0.0637,
116
+ "step": 1284
117
+ },
118
+ {
119
+ "epoch": 6.0,
120
+ "eval_accuracy": 0.6649091974223784,
121
+ "eval_f1": 0.02389078498293515,
122
+ "eval_loss": 0.06161979213356972,
123
+ "eval_mcc": 0.016686958293742785,
124
+ "eval_precision": 0.4117647058823529,
125
+ "eval_recall": 0.012302284710017574,
126
+ "eval_runtime": 3.2462,
127
+ "eval_samples_per_second": 525.852,
128
+ "eval_steps_per_second": 16.635,
129
+ "step": 1284
130
+ },
131
+ {
132
+ "epoch": 7.0,
133
+ "grad_norm": 0.4732053279876709,
134
+ "learning_rate": 2.9787144988347488e-05,
135
+ "loss": 0.0634,
136
+ "step": 1498
137
+ },
138
+ {
139
+ "epoch": 7.0,
140
+ "eval_accuracy": 0.664323374340949,
141
+ "eval_f1": 0.020512820512820513,
142
+ "eval_loss": 0.061539050191640854,
143
+ "eval_mcc": 0.008597718124511362,
144
+ "eval_precision": 0.375,
145
+ "eval_recall": 0.01054481546572935,
146
+ "eval_runtime": 3.1722,
147
+ "eval_samples_per_second": 538.12,
148
+ "eval_steps_per_second": 17.023,
149
+ "step": 1498
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "grad_norm": 0.4758211374282837,
154
+ "learning_rate": 2.55318385614407e-05,
155
+ "loss": 0.0631,
156
+ "step": 1712
157
+ },
158
+ {
159
+ "epoch": 8.0,
160
+ "eval_accuracy": 0.6654950205038078,
161
+ "eval_f1": 0.017211703958691912,
162
+ "eval_loss": 0.061484288424253464,
163
+ "eval_mcc": 0.01487410293271824,
164
+ "eval_precision": 0.4166666666666667,
165
+ "eval_recall": 0.008787346221441126,
166
+ "eval_runtime": 3.1952,
167
+ "eval_samples_per_second": 534.244,
168
+ "eval_steps_per_second": 16.901,
169
+ "step": 1712
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "grad_norm": 0.3838660418987274,
174
+ "learning_rate": 2.127653213453392e-05,
175
+ "loss": 0.0632,
176
+ "step": 1926
177
+ },
178
+ {
179
+ "epoch": 9.0,
180
+ "eval_accuracy": 0.6660808435852372,
181
+ "eval_f1": 0.01724137931034483,
182
+ "eval_loss": 0.06174994260072708,
183
+ "eval_mcc": 0.020707884164064556,
184
+ "eval_precision": 0.45454545454545453,
185
+ "eval_recall": 0.008787346221441126,
186
+ "eval_runtime": 3.1409,
187
+ "eval_samples_per_second": 543.466,
188
+ "eval_steps_per_second": 17.192,
189
+ "step": 1926
190
+ },
191
+ {
192
+ "epoch": 10.0,
193
+ "grad_norm": 0.3443503677845001,
194
+ "learning_rate": 1.7021225707627134e-05,
195
+ "loss": 0.0629,
196
+ "step": 2140
197
+ },
198
+ {
199
+ "epoch": 10.0,
200
+ "eval_accuracy": 0.664323374340949,
201
+ "eval_f1": 0.017152658662092625,
202
+ "eval_loss": 0.061242878437042236,
203
+ "eval_mcc": 0.004592958330124466,
204
+ "eval_precision": 0.35714285714285715,
205
+ "eval_recall": 0.008787346221441126,
206
+ "eval_runtime": 3.1964,
207
+ "eval_samples_per_second": 534.034,
208
+ "eval_steps_per_second": 16.894,
209
+ "step": 2140
210
+ },
211
+ {
212
+ "epoch": 11.0,
213
+ "grad_norm": 0.31307530403137207,
214
+ "learning_rate": 1.276591928072035e-05,
215
+ "loss": 0.0628,
216
+ "step": 2354
217
+ },
218
+ {
219
+ "epoch": 11.0,
220
+ "eval_accuracy": 0.6654950205038078,
221
+ "eval_f1": 0.017211703958691912,
222
+ "eval_loss": 0.061483997851610184,
223
+ "eval_mcc": 0.01487410293271824,
224
+ "eval_precision": 0.4166666666666667,
225
+ "eval_recall": 0.008787346221441126,
226
+ "eval_runtime": 3.1572,
227
+ "eval_samples_per_second": 540.674,
228
+ "eval_steps_per_second": 17.104,
229
+ "step": 2354
230
+ },
231
+ {
232
+ "epoch": 12.0,
233
+ "grad_norm": 0.26839011907577515,
234
+ "learning_rate": 8.510612853813567e-06,
235
+ "loss": 0.0628,
236
+ "step": 2568
237
+ },
238
+ {
239
+ "epoch": 12.0,
240
+ "eval_accuracy": 0.6654950205038078,
241
+ "eval_f1": 0.017211703958691912,
242
+ "eval_loss": 0.06137599050998688,
243
+ "eval_mcc": 0.01487410293271824,
244
+ "eval_precision": 0.4166666666666667,
245
+ "eval_recall": 0.008787346221441126,
246
+ "eval_runtime": 3.1832,
247
+ "eval_samples_per_second": 536.247,
248
+ "eval_steps_per_second": 16.964,
249
+ "step": 2568
250
+ },
251
+ {
252
+ "epoch": 13.0,
253
+ "grad_norm": 0.8179745674133301,
254
+ "learning_rate": 4.2553064269067835e-06,
255
+ "loss": 0.0626,
256
+ "step": 2782
257
+ },
258
+ {
259
+ "epoch": 13.0,
260
+ "eval_accuracy": 0.6654950205038078,
261
+ "eval_f1": 0.017211703958691912,
262
+ "eval_loss": 0.06123984605073929,
263
+ "eval_mcc": 0.01487410293271824,
264
+ "eval_precision": 0.4166666666666667,
265
+ "eval_recall": 0.008787346221441126,
266
+ "eval_runtime": 3.1468,
267
+ "eval_samples_per_second": 542.461,
268
+ "eval_steps_per_second": 17.16,
269
+ "step": 2782
270
+ },
271
+ {
272
+ "epoch": 14.0,
273
+ "grad_norm": 0.34526437520980835,
274
+ "learning_rate": 0.0,
275
+ "loss": 0.0624,
276
+ "step": 2996
277
+ },
278
+ {
279
+ "epoch": 14.0,
280
+ "eval_accuracy": 0.6654950205038078,
281
+ "eval_f1": 0.017211703958691912,
282
+ "eval_loss": 0.061105918139219284,
283
+ "eval_mcc": 0.01487410293271824,
284
+ "eval_precision": 0.4166666666666667,
285
+ "eval_recall": 0.008787346221441126,
286
+ "eval_runtime": 3.9414,
287
+ "eval_samples_per_second": 433.098,
288
+ "eval_steps_per_second": 13.701,
289
+ "step": 2996
290
+ }
291
+ ],
292
+ "logging_steps": 500,
293
+ "max_steps": 2996,
294
+ "num_input_tokens_seen": 0,
295
+ "num_train_epochs": 14,
296
+ "save_steps": 500,
297
+ "total_flos": 7346859302160.0,
298
+ "train_batch_size": 32,
299
+ "trial_name": null,
300
+ "trial_params": {
301
+ "alpha": 0.08651897585698409,
302
+ "learning_rate": 5.9574289976694975e-05,
303
+ "num_train_epochs": 14,
304
+ "temperature": 43
305
+ }
306
+ }
run-0/checkpoint-2996/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43fb637dc58b3d23529e81f9c2590c1fd459503a7bb87369b51fad6346ae41da
3
+ size 5048
run-0/checkpoint-2996/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2140/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 128,
11
+ "id2label": {
12
+ "0": "negative",
13
+ "1": "positive"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 512,
17
+ "label2id": {
18
+ "negative": "0",
19
+ "positive": "1"
20
+ },
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
+ "model_type": "bert",
24
+ "num_attention_heads": 2,
25
+ "num_hidden_layers": 2,
26
+ "pad_token_id": 0,
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.40.0",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 30522
34
+ }
run-1/checkpoint-2140/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ee9015035ca4fc6fda13391c8e272110100694e19aa70c459bf4bab0f1329f
3
+ size 17549312
run-1/checkpoint-2140/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c1c6392eb8b5b0973b021b886b3b4d774649dc2632b2d576c8bd9dcfb39976d
3
+ size 35123898
run-1/checkpoint-2140/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddb16c48b271d0534b9a61cb30864d6f8bb1c10a35f7fb5f704c529b2ed28df1
3
+ size 14308
run-1/checkpoint-2140/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:779fcd67b94d2c110080f05d50aad0d23652ffb63d562fa79c77de34dad1bdde
3
+ size 1064
run-1/checkpoint-2140/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-1/checkpoint-2140/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2140/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
run-1/checkpoint-2140/trainer_state.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.700058582308143,
3
+ "best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-2140",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2140,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 2.750048875808716,
14
+ "learning_rate": 0.0008904595556519428,
15
+ "loss": 0.3982,
16
+ "step": 214
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6936145284124194,
21
+ "eval_f1": 0.2496413199426112,
22
+ "eval_loss": 0.3941108286380768,
23
+ "eval_mcc": 0.2091898426756361,
24
+ "eval_precision": 0.6796875,
25
+ "eval_recall": 0.15289982425307558,
26
+ "eval_runtime": 3.1574,
27
+ "eval_samples_per_second": 540.639,
28
+ "eval_steps_per_second": 17.103,
29
+ "step": 214
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 1.170525074005127,
34
+ "learning_rate": 0.0008014136000867486,
35
+ "loss": 0.3881,
36
+ "step": 428
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.6666666666666666,
41
+ "eval_f1": 0.0,
42
+ "eval_loss": 0.3913939595222473,
43
+ "eval_mcc": 0.0,
44
+ "eval_precision": 0.0,
45
+ "eval_recall": 0.0,
46
+ "eval_runtime": 3.2957,
47
+ "eval_samples_per_second": 517.943,
48
+ "eval_steps_per_second": 16.385,
49
+ "step": 428
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "grad_norm": 0.8615076541900635,
54
+ "learning_rate": 0.0007123676445215543,
55
+ "loss": 0.3832,
56
+ "step": 642
57
+ },
58
+ {
59
+ "epoch": 3.0,
60
+ "eval_accuracy": 0.69302870533099,
61
+ "eval_f1": 0.2681564245810056,
62
+ "eval_loss": 0.3964165151119232,
63
+ "eval_mcc": 0.20820116163188562,
64
+ "eval_precision": 0.6530612244897959,
65
+ "eval_recall": 0.1687170474516696,
66
+ "eval_runtime": 3.1559,
67
+ "eval_samples_per_second": 540.891,
68
+ "eval_steps_per_second": 17.111,
69
+ "step": 642
70
+ },
71
+ {
72
+ "epoch": 4.0,
73
+ "grad_norm": 1.2408286333084106,
74
+ "learning_rate": 0.00062332168895636,
75
+ "loss": 0.3935,
76
+ "step": 856
77
+ },
78
+ {
79
+ "epoch": 4.0,
80
+ "eval_accuracy": 0.6924428822495606,
81
+ "eval_f1": 0.25742574257425743,
82
+ "eval_loss": 0.39223405718803406,
83
+ "eval_mcc": 0.20514822121510018,
84
+ "eval_precision": 0.6594202898550725,
85
+ "eval_recall": 0.15992970123022848,
86
+ "eval_runtime": 3.8392,
87
+ "eval_samples_per_second": 444.629,
88
+ "eval_steps_per_second": 14.066,
89
+ "step": 856
90
+ },
91
+ {
92
+ "epoch": 5.0,
93
+ "grad_norm": 3.090942144393921,
94
+ "learning_rate": 0.0005342757333911657,
95
+ "loss": 0.3871,
96
+ "step": 1070
97
+ },
98
+ {
99
+ "epoch": 5.0,
100
+ "eval_accuracy": 0.6666666666666666,
101
+ "eval_f1": 0.0,
102
+ "eval_loss": 0.389874130487442,
103
+ "eval_mcc": 0.0,
104
+ "eval_precision": 0.0,
105
+ "eval_recall": 0.0,
106
+ "eval_runtime": 3.1327,
107
+ "eval_samples_per_second": 544.899,
108
+ "eval_steps_per_second": 17.238,
109
+ "step": 1070
110
+ },
111
+ {
112
+ "epoch": 6.0,
113
+ "grad_norm": 2.2668867111206055,
114
+ "learning_rate": 0.0004452297778259714,
115
+ "loss": 0.3881,
116
+ "step": 1284
117
+ },
118
+ {
119
+ "epoch": 6.0,
120
+ "eval_accuracy": 0.6977152899824253,
121
+ "eval_f1": 0.26074498567335247,
122
+ "eval_loss": 0.3898678421974182,
123
+ "eval_mcc": 0.22568315247838705,
124
+ "eval_precision": 0.7054263565891473,
125
+ "eval_recall": 0.15992970123022848,
126
+ "eval_runtime": 3.2109,
127
+ "eval_samples_per_second": 531.633,
128
+ "eval_steps_per_second": 16.818,
129
+ "step": 1284
130
+ },
131
+ {
132
+ "epoch": 7.0,
133
+ "grad_norm": 0.9589310884475708,
134
+ "learning_rate": 0.00035618382226077714,
135
+ "loss": 0.3856,
136
+ "step": 1498
137
+ },
138
+ {
139
+ "epoch": 7.0,
140
+ "eval_accuracy": 0.690099589923843,
141
+ "eval_f1": 0.18238021638330756,
142
+ "eval_loss": 0.3910105228424072,
143
+ "eval_mcc": 0.19638682696493384,
144
+ "eval_precision": 0.7564102564102564,
145
+ "eval_recall": 0.10369068541300527,
146
+ "eval_runtime": 3.168,
147
+ "eval_samples_per_second": 538.82,
148
+ "eval_steps_per_second": 17.045,
149
+ "step": 1498
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "grad_norm": 1.0469610691070557,
154
+ "learning_rate": 0.00026713786669558284,
155
+ "loss": 0.3795,
156
+ "step": 1712
157
+ },
158
+ {
159
+ "epoch": 8.0,
160
+ "eval_accuracy": 0.6965436438195665,
161
+ "eval_f1": 0.22686567164179106,
162
+ "eval_loss": 0.3840750753879547,
163
+ "eval_mcc": 0.22297453441408152,
164
+ "eval_precision": 0.7524752475247525,
165
+ "eval_recall": 0.1335676625659051,
166
+ "eval_runtime": 3.305,
167
+ "eval_samples_per_second": 516.497,
168
+ "eval_steps_per_second": 16.339,
169
+ "step": 1712
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "grad_norm": 0.8197473883628845,
174
+ "learning_rate": 0.00017809191113038857,
175
+ "loss": 0.3762,
176
+ "step": 1926
177
+ },
178
+ {
179
+ "epoch": 9.0,
180
+ "eval_accuracy": 0.6936145284124194,
181
+ "eval_f1": 0.19908116385911181,
182
+ "eval_loss": 0.3833659291267395,
183
+ "eval_mcc": 0.21257347787570094,
184
+ "eval_precision": 0.7738095238095238,
185
+ "eval_recall": 0.11423550087873462,
186
+ "eval_runtime": 3.1266,
187
+ "eval_samples_per_second": 545.952,
188
+ "eval_steps_per_second": 17.271,
189
+ "step": 1926
190
+ },
191
+ {
192
+ "epoch": 10.0,
193
+ "grad_norm": 2.2055039405822754,
194
+ "learning_rate": 8.904595556519429e-05,
195
+ "loss": 0.3738,
196
+ "step": 2140
197
+ },
198
+ {
199
+ "epoch": 10.0,
200
+ "eval_accuracy": 0.700058582308143,
201
+ "eval_f1": 0.30997304582210244,
202
+ "eval_loss": 0.3850247263908386,
203
+ "eval_mcc": 0.23609006824017303,
204
+ "eval_precision": 0.6647398843930635,
205
+ "eval_recall": 0.20210896309314588,
206
+ "eval_runtime": 3.1854,
207
+ "eval_samples_per_second": 535.877,
208
+ "eval_steps_per_second": 16.952,
209
+ "step": 2140
210
+ }
211
+ ],
212
+ "logging_steps": 500,
213
+ "max_steps": 2354,
214
+ "num_input_tokens_seen": 0,
215
+ "num_train_epochs": 11,
216
+ "save_steps": 500,
217
+ "total_flos": 5247756644400.0,
218
+ "train_batch_size": 32,
219
+ "trial_name": null,
220
+ "trial_params": {
221
+ "alpha": 0.6246941768140464,
222
+ "learning_rate": 0.000979505511217137,
223
+ "num_train_epochs": 11,
224
+ "temperature": 32
225
+ }
226
+ }
run-1/checkpoint-2140/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d143f9ad430eb4bdcb42b2bc68f69577e4d0a45752af31555ead47e276d252
3
+ size 5048
run-1/checkpoint-2140/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2354/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 128,
11
+ "id2label": {
12
+ "0": "negative",
13
+ "1": "positive"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 512,
17
+ "label2id": {
18
+ "negative": "0",
19
+ "positive": "1"
20
+ },
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
+ "model_type": "bert",
24
+ "num_attention_heads": 2,
25
+ "num_hidden_layers": 2,
26
+ "pad_token_id": 0,
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.40.0",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 30522
34
+ }
run-1/checkpoint-2354/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbec1f24b0dd6424b800ee43d82ad08f76324c77508985422e71a486f73e8ae5
3
+ size 17549312
run-1/checkpoint-2354/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c793b4c5556c27883b23f1cf17850ab9aa31144de15998b99f2abfc12531c00
3
+ size 35123898
run-1/checkpoint-2354/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d86a1cc2be603ae8ad6d0dcc0791ff88331c7f3e5759ecb7af57b72362de36ff
3
+ size 14308
run-1/checkpoint-2354/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc186f296748e42632cafe1c0a9739d24599f9d23133e33adb55c3e250fdd1ba
3
+ size 1064
run-1/checkpoint-2354/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-1/checkpoint-2354/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2354/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
run-1/checkpoint-2354/trainer_state.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7012302284710018,
3
+ "best_model_checkpoint": "tiny-bert-sst2-distilled/run-1/checkpoint-2354",
4
+ "epoch": 11.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2354,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 2.750048875808716,
14
+ "learning_rate": 0.0008904595556519428,
15
+ "loss": 0.3982,
16
+ "step": 214
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6936145284124194,
21
+ "eval_f1": 0.2496413199426112,
22
+ "eval_loss": 0.3941108286380768,
23
+ "eval_mcc": 0.2091898426756361,
24
+ "eval_precision": 0.6796875,
25
+ "eval_recall": 0.15289982425307558,
26
+ "eval_runtime": 3.1574,
27
+ "eval_samples_per_second": 540.639,
28
+ "eval_steps_per_second": 17.103,
29
+ "step": 214
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 1.170525074005127,
34
+ "learning_rate": 0.0008014136000867486,
35
+ "loss": 0.3881,
36
+ "step": 428
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.6666666666666666,
41
+ "eval_f1": 0.0,
42
+ "eval_loss": 0.3913939595222473,
43
+ "eval_mcc": 0.0,
44
+ "eval_precision": 0.0,
45
+ "eval_recall": 0.0,
46
+ "eval_runtime": 3.2957,
47
+ "eval_samples_per_second": 517.943,
48
+ "eval_steps_per_second": 16.385,
49
+ "step": 428
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "grad_norm": 0.8615076541900635,
54
+ "learning_rate": 0.0007123676445215543,
55
+ "loss": 0.3832,
56
+ "step": 642
57
+ },
58
+ {
59
+ "epoch": 3.0,
60
+ "eval_accuracy": 0.69302870533099,
61
+ "eval_f1": 0.2681564245810056,
62
+ "eval_loss": 0.3964165151119232,
63
+ "eval_mcc": 0.20820116163188562,
64
+ "eval_precision": 0.6530612244897959,
65
+ "eval_recall": 0.1687170474516696,
66
+ "eval_runtime": 3.1559,
67
+ "eval_samples_per_second": 540.891,
68
+ "eval_steps_per_second": 17.111,
69
+ "step": 642
70
+ },
71
+ {
72
+ "epoch": 4.0,
73
+ "grad_norm": 1.2408286333084106,
74
+ "learning_rate": 0.00062332168895636,
75
+ "loss": 0.3935,
76
+ "step": 856
77
+ },
78
+ {
79
+ "epoch": 4.0,
80
+ "eval_accuracy": 0.6924428822495606,
81
+ "eval_f1": 0.25742574257425743,
82
+ "eval_loss": 0.39223405718803406,
83
+ "eval_mcc": 0.20514822121510018,
84
+ "eval_precision": 0.6594202898550725,
85
+ "eval_recall": 0.15992970123022848,
86
+ "eval_runtime": 3.8392,
87
+ "eval_samples_per_second": 444.629,
88
+ "eval_steps_per_second": 14.066,
89
+ "step": 856
90
+ },
91
+ {
92
+ "epoch": 5.0,
93
+ "grad_norm": 3.090942144393921,
94
+ "learning_rate": 0.0005342757333911657,
95
+ "loss": 0.3871,
96
+ "step": 1070
97
+ },
98
+ {
99
+ "epoch": 5.0,
100
+ "eval_accuracy": 0.6666666666666666,
101
+ "eval_f1": 0.0,
102
+ "eval_loss": 0.389874130487442,
103
+ "eval_mcc": 0.0,
104
+ "eval_precision": 0.0,
105
+ "eval_recall": 0.0,
106
+ "eval_runtime": 3.1327,
107
+ "eval_samples_per_second": 544.899,
108
+ "eval_steps_per_second": 17.238,
109
+ "step": 1070
110
+ },
111
+ {
112
+ "epoch": 6.0,
113
+ "grad_norm": 2.2668867111206055,
114
+ "learning_rate": 0.0004452297778259714,
115
+ "loss": 0.3881,
116
+ "step": 1284
117
+ },
118
+ {
119
+ "epoch": 6.0,
120
+ "eval_accuracy": 0.6977152899824253,
121
+ "eval_f1": 0.26074498567335247,
122
+ "eval_loss": 0.3898678421974182,
123
+ "eval_mcc": 0.22568315247838705,
124
+ "eval_precision": 0.7054263565891473,
125
+ "eval_recall": 0.15992970123022848,
126
+ "eval_runtime": 3.2109,
127
+ "eval_samples_per_second": 531.633,
128
+ "eval_steps_per_second": 16.818,
129
+ "step": 1284
130
+ },
131
+ {
132
+ "epoch": 7.0,
133
+ "grad_norm": 0.9589310884475708,
134
+ "learning_rate": 0.00035618382226077714,
135
+ "loss": 0.3856,
136
+ "step": 1498
137
+ },
138
+ {
139
+ "epoch": 7.0,
140
+ "eval_accuracy": 0.690099589923843,
141
+ "eval_f1": 0.18238021638330756,
142
+ "eval_loss": 0.3910105228424072,
143
+ "eval_mcc": 0.19638682696493384,
144
+ "eval_precision": 0.7564102564102564,
145
+ "eval_recall": 0.10369068541300527,
146
+ "eval_runtime": 3.168,
147
+ "eval_samples_per_second": 538.82,
148
+ "eval_steps_per_second": 17.045,
149
+ "step": 1498
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "grad_norm": 1.0469610691070557,
154
+ "learning_rate": 0.00026713786669558284,
155
+ "loss": 0.3795,
156
+ "step": 1712
157
+ },
158
+ {
159
+ "epoch": 8.0,
160
+ "eval_accuracy": 0.6965436438195665,
161
+ "eval_f1": 0.22686567164179106,
162
+ "eval_loss": 0.3840750753879547,
163
+ "eval_mcc": 0.22297453441408152,
164
+ "eval_precision": 0.7524752475247525,
165
+ "eval_recall": 0.1335676625659051,
166
+ "eval_runtime": 3.305,
167
+ "eval_samples_per_second": 516.497,
168
+ "eval_steps_per_second": 16.339,
169
+ "step": 1712
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "grad_norm": 0.8197473883628845,
174
+ "learning_rate": 0.00017809191113038857,
175
+ "loss": 0.3762,
176
+ "step": 1926
177
+ },
178
+ {
179
+ "epoch": 9.0,
180
+ "eval_accuracy": 0.6936145284124194,
181
+ "eval_f1": 0.19908116385911181,
182
+ "eval_loss": 0.3833659291267395,
183
+ "eval_mcc": 0.21257347787570094,
184
+ "eval_precision": 0.7738095238095238,
185
+ "eval_recall": 0.11423550087873462,
186
+ "eval_runtime": 3.1266,
187
+ "eval_samples_per_second": 545.952,
188
+ "eval_steps_per_second": 17.271,
189
+ "step": 1926
190
+ },
191
+ {
192
+ "epoch": 10.0,
193
+ "grad_norm": 2.2055039405822754,
194
+ "learning_rate": 8.904595556519429e-05,
195
+ "loss": 0.3738,
196
+ "step": 2140
197
+ },
198
+ {
199
+ "epoch": 10.0,
200
+ "eval_accuracy": 0.700058582308143,
201
+ "eval_f1": 0.30997304582210244,
202
+ "eval_loss": 0.3850247263908386,
203
+ "eval_mcc": 0.23609006824017303,
204
+ "eval_precision": 0.6647398843930635,
205
+ "eval_recall": 0.20210896309314588,
206
+ "eval_runtime": 3.1854,
207
+ "eval_samples_per_second": 535.877,
208
+ "eval_steps_per_second": 16.952,
209
+ "step": 2140
210
+ },
211
+ {
212
+ "epoch": 11.0,
213
+ "grad_norm": 1.5309932231903076,
214
+ "learning_rate": 0.0,
215
+ "loss": 0.3714,
216
+ "step": 2354
217
+ },
218
+ {
219
+ "epoch": 11.0,
220
+ "eval_accuracy": 0.7012302284710018,
221
+ "eval_f1": 0.2877094972067039,
222
+ "eval_loss": 0.38300591707229614,
223
+ "eval_mcc": 0.23920984527918776,
224
+ "eval_precision": 0.7006802721088435,
225
+ "eval_recall": 0.18101933216168717,
226
+ "eval_runtime": 3.1469,
227
+ "eval_samples_per_second": 542.44,
228
+ "eval_steps_per_second": 17.16,
229
+ "step": 2354
230
+ }
231
+ ],
232
+ "logging_steps": 500,
233
+ "max_steps": 2354,
234
+ "num_input_tokens_seen": 0,
235
+ "num_train_epochs": 11,
236
+ "save_steps": 500,
237
+ "total_flos": 5772532308840.0,
238
+ "train_batch_size": 32,
239
+ "trial_name": null,
240
+ "trial_params": {
241
+ "alpha": 0.6246941768140464,
242
+ "learning_rate": 0.000979505511217137,
243
+ "num_train_epochs": 11,
244
+ "temperature": 32
245
+ }
246
+ }
run-1/checkpoint-2354/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d143f9ad430eb4bdcb42b2bc68f69577e4d0a45752af31555ead47e276d252
3
+ size 5048
run-1/checkpoint-2354/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a80be2083aab6d55a2636528b4683957867c84dc24de15d19cfb958da8d2b60
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d143f9ad430eb4bdcb42b2bc68f69577e4d0a45752af31555ead47e276d252
3
  size 5048