alfiinyang commited on
Commit
5194326
·
1 Parent(s): 9a9caf3

Update model and tokenizer

Browse files
Files changed (35) hide show
  1. logs/events.out.tfevents.1721998301.cce3337b9a02.157.0 +3 -0
  2. logs/events.out.tfevents.1721998341.cce3337b9a02.157.1 +3 -0
  3. logs/events.out.tfevents.1722242538.1e3c4db5059c.468.0 +3 -0
  4. logs/events.out.tfevents.1722255318.6e17b7a5c46d.1118.0 +3 -0
  5. logs/events.out.tfevents.1722265698.47109c206b48.1457.0 +3 -0
  6. logs/events.out.tfevents.1722440330.c8b573fa0d84.1534.0 +3 -0
  7. logs/events.out.tfevents.1724249839.4d4c0a2e9def.239.0 +3 -0
  8. logs/events.out.tfevents.1724250323.4d4c0a2e9def.239.1 +3 -0
  9. results/checkpoint-154/config.json +71 -0
  10. results/checkpoint-154/special_tokens_map.json +7 -0
  11. results/checkpoint-154/tokenizer_config.json +57 -0
  12. results/checkpoint-154/training_args.bin +3 -0
  13. results/checkpoint-154/vocab.txt +0 -0
  14. results/checkpoint-231/config.json +71 -0
  15. results/checkpoint-231/model.safetensors +3 -0
  16. results/checkpoint-231/optimizer.pt +3 -0
  17. results/checkpoint-231/rng_state.pth +3 -0
  18. results/checkpoint-231/scheduler.pt +3 -0
  19. results/checkpoint-231/special_tokens_map.json +7 -0
  20. results/checkpoint-231/tokenizer_config.json +57 -0
  21. results/checkpoint-231/trainer_state.json +194 -0
  22. results/checkpoint-231/training_args.bin +3 -0
  23. results/checkpoint-231/vocab.txt +0 -0
  24. results/checkpoint-77/config.json +71 -0
  25. results/checkpoint-77/model.safetensors +3 -0
  26. results/checkpoint-77/optimizer.pt +3 -0
  27. results/checkpoint-77/rng_state.pth +3 -0
  28. results/checkpoint-77/scheduler.pt +3 -0
  29. results/checkpoint-77/special_tokens_map.json +7 -0
  30. results/checkpoint-77/tokenizer_config.json +57 -0
  31. results/checkpoint-77/trainer_state.json +92 -0
  32. results/checkpoint-77/training_args.bin +3 -0
  33. results/checkpoint-77/vocab.txt +0 -0
  34. tf_checkpoints/config.json +71 -0
  35. tf_checkpoints/model.safetensors +3 -0
logs/events.out.tfevents.1721998301.cce3337b9a02.157.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec12f1e14dcab665e739c9a8b5e7f418d919443f31924ef9bf87c468267690c
3
+ size 5959
logs/events.out.tfevents.1721998341.cce3337b9a02.157.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37c56c6483aeaec3796fa21f527479a9b7ff495c24fcd9859bb4c965e294cf40
3
+ size 10960
logs/events.out.tfevents.1722242538.1e3c4db5059c.468.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54fad8e73ebcd4ae24b5eac61e933fa35dc60aab708303bb645b25f8e81b212b
3
+ size 10911
logs/events.out.tfevents.1722255318.6e17b7a5c46d.1118.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:026d0713a48d756068c1a43a64d74cb76e40900d3686a169ca3053a933b388cf
3
+ size 10911
logs/events.out.tfevents.1722265698.47109c206b48.1457.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9901e2c5f1684bef148969cec93cb3cbca39a8124597da689f1ab88acd18411e
3
+ size 9291
logs/events.out.tfevents.1722440330.c8b573fa0d84.1534.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08694e8b9a9f2e28e24008e7bf9b0e67a56a20f80e237796fda2955c25393065
3
+ size 10911
logs/events.out.tfevents.1724249839.4d4c0a2e9def.239.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5a74aed3595453502e87bad53005515e26ae5a75078265ee05aa2d56148f70a
3
+ size 6580
logs/events.out.tfevents.1724250323.4d4c0a2e9def.239.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6277e14ca9d24068087cc6be7f9f38e8d92386160927c5867b4f5056c2f595
3
+ size 88
results/checkpoint-154/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4",
18
+ "5": "LABEL_5",
19
+ "6": "LABEL_6",
20
+ "7": "LABEL_7",
21
+ "8": "LABEL_8",
22
+ "9": "LABEL_9",
23
+ "10": "LABEL_10",
24
+ "11": "LABEL_11",
25
+ "12": "LABEL_12",
26
+ "13": "LABEL_13",
27
+ "14": "LABEL_14",
28
+ "15": "LABEL_15",
29
+ "16": "LABEL_16",
30
+ "17": "LABEL_17",
31
+ "18": "LABEL_18",
32
+ "19": "LABEL_19"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_3": 3,
51
+ "LABEL_4": 4,
52
+ "LABEL_5": 5,
53
+ "LABEL_6": 6,
54
+ "LABEL_7": 7,
55
+ "LABEL_8": 8,
56
+ "LABEL_9": 9
57
+ },
58
+ "layer_norm_eps": 1e-12,
59
+ "max_position_embeddings": 512,
60
+ "model_type": "bert",
61
+ "num_attention_heads": 12,
62
+ "num_hidden_layers": 12,
63
+ "pad_token_id": 0,
64
+ "position_embedding_type": "absolute",
65
+ "problem_type": "single_label_classification",
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.42.4",
68
+ "type_vocab_size": 2,
69
+ "use_cache": true,
70
+ "vocab_size": 30522
71
+ }
results/checkpoint-154/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
results/checkpoint-154/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
results/checkpoint-154/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f01ff5ce227ceb86297dcffc4b183939337de0f07d7c52948aca1b0412f659
3
+ size 5176
results/checkpoint-154/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
results/checkpoint-231/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4",
18
+ "5": "LABEL_5",
19
+ "6": "LABEL_6",
20
+ "7": "LABEL_7",
21
+ "8": "LABEL_8",
22
+ "9": "LABEL_9",
23
+ "10": "LABEL_10",
24
+ "11": "LABEL_11",
25
+ "12": "LABEL_12",
26
+ "13": "LABEL_13",
27
+ "14": "LABEL_14",
28
+ "15": "LABEL_15",
29
+ "16": "LABEL_16",
30
+ "17": "LABEL_17",
31
+ "18": "LABEL_18",
32
+ "19": "LABEL_19"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_3": 3,
51
+ "LABEL_4": 4,
52
+ "LABEL_5": 5,
53
+ "LABEL_6": 6,
54
+ "LABEL_7": 7,
55
+ "LABEL_8": 8,
56
+ "LABEL_9": 9
57
+ },
58
+ "layer_norm_eps": 1e-12,
59
+ "max_position_embeddings": 512,
60
+ "model_type": "bert",
61
+ "num_attention_heads": 12,
62
+ "num_hidden_layers": 12,
63
+ "pad_token_id": 0,
64
+ "position_embedding_type": "absolute",
65
+ "problem_type": "single_label_classification",
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.42.4",
68
+ "type_vocab_size": 2,
69
+ "use_cache": true,
70
+ "vocab_size": 30522
71
+ }
results/checkpoint-231/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:242e08e641133670e3f3f77271aab38bd00c2cf6eba322a96664d7a5182a276a
3
+ size 438014016
results/checkpoint-231/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:655c478d59a9838f8c5e2bb07fbae8a6a9c7f8f8a74c8f183e11d1c63f04db30
3
+ size 876143482
results/checkpoint-231/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bec10ad3b595f619b606a8f0dc5706764d6ac93461fd7df1cda7f924dfdef430
3
+ size 13990
results/checkpoint-231/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6dc5d70ea6d95614d83cf3921988645c741697057908e15f26601c404024d6b
3
+ size 1064
results/checkpoint-231/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
results/checkpoint-231/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
results/checkpoint-231/trainer_state.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 231,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12987012987012986,
13
+ "grad_norm": 8.817961692810059,
14
+ "learning_rate": 1.0000000000000002e-06,
15
+ "loss": 2.8591,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.2597402597402597,
20
+ "grad_norm": 11.00772762298584,
21
+ "learning_rate": 2.0000000000000003e-06,
22
+ "loss": 2.8562,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.38961038961038963,
27
+ "grad_norm": 10.66635799407959,
28
+ "learning_rate": 3e-06,
29
+ "loss": 2.867,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.5194805194805194,
34
+ "grad_norm": 8.26025104522705,
35
+ "learning_rate": 4.000000000000001e-06,
36
+ "loss": 2.7538,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.6493506493506493,
41
+ "grad_norm": 10.763989448547363,
42
+ "learning_rate": 5e-06,
43
+ "loss": 2.6737,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.7792207792207793,
48
+ "grad_norm": 10.080073356628418,
49
+ "learning_rate": 6e-06,
50
+ "loss": 2.6648,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.9090909090909091,
55
+ "grad_norm": 8.357165336608887,
56
+ "learning_rate": 7.000000000000001e-06,
57
+ "loss": 2.5777,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 1.0389610389610389,
62
+ "grad_norm": 10.30080509185791,
63
+ "learning_rate": 8.000000000000001e-06,
64
+ "loss": 2.3947,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 1.1688311688311688,
69
+ "grad_norm": 17.036989212036133,
70
+ "learning_rate": 9e-06,
71
+ "loss": 2.2674,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 1.2987012987012987,
76
+ "grad_norm": 9.885930061340332,
77
+ "learning_rate": 1e-05,
78
+ "loss": 2.0075,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 1.4285714285714286,
83
+ "grad_norm": 10.072978973388672,
84
+ "learning_rate": 1.1000000000000001e-05,
85
+ "loss": 1.9707,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 1.5584415584415585,
90
+ "grad_norm": 10.257423400878906,
91
+ "learning_rate": 1.2e-05,
92
+ "loss": 1.67,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 1.6883116883116882,
97
+ "grad_norm": 7.1921820640563965,
98
+ "learning_rate": 1.3000000000000001e-05,
99
+ "loss": 1.5383,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 1.8181818181818183,
104
+ "grad_norm": 8.42944049835205,
105
+ "learning_rate": 1.4000000000000001e-05,
106
+ "loss": 1.5019,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 1.948051948051948,
111
+ "grad_norm": 8.962895393371582,
112
+ "learning_rate": 1.5e-05,
113
+ "loss": 1.0832,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 2.0779220779220777,
118
+ "grad_norm": 12.685582160949707,
119
+ "learning_rate": 1.6000000000000003e-05,
120
+ "loss": 1.0589,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 2.207792207792208,
125
+ "grad_norm": 7.99977445602417,
126
+ "learning_rate": 1.7000000000000003e-05,
127
+ "loss": 1.0717,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 2.3376623376623376,
132
+ "grad_norm": 4.187084197998047,
133
+ "learning_rate": 1.8e-05,
134
+ "loss": 1.0521,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 2.4675324675324677,
139
+ "grad_norm": 6.426929950714111,
140
+ "learning_rate": 1.9e-05,
141
+ "loss": 0.9361,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 2.5974025974025974,
146
+ "grad_norm": 4.999221324920654,
147
+ "learning_rate": 2e-05,
148
+ "loss": 0.7682,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 2.7272727272727275,
153
+ "grad_norm": 3.239588975906372,
154
+ "learning_rate": 2.1e-05,
155
+ "loss": 0.4877,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 2.857142857142857,
160
+ "grad_norm": 16.341793060302734,
161
+ "learning_rate": 2.2000000000000003e-05,
162
+ "loss": 0.603,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 2.987012987012987,
167
+ "grad_norm": 4.9825520515441895,
168
+ "learning_rate": 2.3000000000000003e-05,
169
+ "loss": 0.7032,
170
+ "step": 230
171
+ }
172
+ ],
173
+ "logging_steps": 10,
174
+ "max_steps": 231,
175
+ "num_input_tokens_seen": 0,
176
+ "num_train_epochs": 3,
177
+ "save_steps": 500,
178
+ "stateful_callbacks": {
179
+ "TrainerControl": {
180
+ "args": {
181
+ "should_epoch_stop": false,
182
+ "should_evaluate": false,
183
+ "should_log": false,
184
+ "should_save": true,
185
+ "should_training_stop": true
186
+ },
187
+ "attributes": {}
188
+ }
189
+ },
190
+ "total_flos": 42464352759840.0,
191
+ "train_batch_size": 8,
192
+ "trial_name": null,
193
+ "trial_params": null
194
+ }
results/checkpoint-231/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e4c37110bf7f623eb276fdd089d0d230ba93456284e868edf869ade524c60e
3
+ size 5176
results/checkpoint-231/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
results/checkpoint-77/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4",
18
+ "5": "LABEL_5",
19
+ "6": "LABEL_6",
20
+ "7": "LABEL_7",
21
+ "8": "LABEL_8",
22
+ "9": "LABEL_9",
23
+ "10": "LABEL_10",
24
+ "11": "LABEL_11",
25
+ "12": "LABEL_12",
26
+ "13": "LABEL_13",
27
+ "14": "LABEL_14",
28
+ "15": "LABEL_15",
29
+ "16": "LABEL_16",
30
+ "17": "LABEL_17",
31
+ "18": "LABEL_18",
32
+ "19": "LABEL_19"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_3": 3,
51
+ "LABEL_4": 4,
52
+ "LABEL_5": 5,
53
+ "LABEL_6": 6,
54
+ "LABEL_7": 7,
55
+ "LABEL_8": 8,
56
+ "LABEL_9": 9
57
+ },
58
+ "layer_norm_eps": 1e-12,
59
+ "max_position_embeddings": 512,
60
+ "model_type": "bert",
61
+ "num_attention_heads": 12,
62
+ "num_hidden_layers": 12,
63
+ "pad_token_id": 0,
64
+ "position_embedding_type": "absolute",
65
+ "problem_type": "single_label_classification",
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.42.4",
68
+ "type_vocab_size": 2,
69
+ "use_cache": true,
70
+ "vocab_size": 30522
71
+ }
results/checkpoint-77/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f91e5e27cfcddb763330f07153dfe8e6b2b29048246a723d48b1f83372a0ae
3
+ size 438014016
results/checkpoint-77/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8779a247fb1f3be014745445cca82461dc457411d8c0e46d8401fcf1445f292b
3
+ size 876143482
results/checkpoint-77/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991e09878f55fa62f5b1e691ca0b7da7c6f66c1f1c1e27b8dba8799c75bbd3a5
3
+ size 13990
results/checkpoint-77/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e104b2e854a3f8f053156570c80d8f729d2ea1b42d3d5e084801ce63d543e9
3
+ size 1064
results/checkpoint-77/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
results/checkpoint-77/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
results/checkpoint-77/trainer_state.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.8195712566375732,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/text2excel/results/checkpoint-77",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 77,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12987012987012986,
13
+ "grad_norm": 10.064971923828125,
14
+ "learning_rate": 1.0000000000000002e-06,
15
+ "loss": 3.0792,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.2597402597402597,
20
+ "grad_norm": 11.230113983154297,
21
+ "learning_rate": 2.0000000000000003e-06,
22
+ "loss": 3.0515,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.38961038961038963,
27
+ "grad_norm": 12.046746253967285,
28
+ "learning_rate": 3e-06,
29
+ "loss": 3.015,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.5194805194805194,
34
+ "grad_norm": 9.873001098632812,
35
+ "learning_rate": 4.000000000000001e-06,
36
+ "loss": 2.8636,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.6493506493506493,
41
+ "grad_norm": 13.206339836120605,
42
+ "learning_rate": 5e-06,
43
+ "loss": 2.7343,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.7792207792207793,
48
+ "grad_norm": 10.480327606201172,
49
+ "learning_rate": 6e-06,
50
+ "loss": 2.7141,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.9090909090909091,
55
+ "grad_norm": 8.450092315673828,
56
+ "learning_rate": 7.000000000000001e-06,
57
+ "loss": 2.58,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 1.0,
62
+ "eval_accuracy": 0.27450980392156865,
63
+ "eval_f1": 0.2746872504657971,
64
+ "eval_loss": 2.8195712566375732,
65
+ "eval_runtime": 24.9461,
66
+ "eval_samples_per_second": 6.133,
67
+ "eval_steps_per_second": 0.802,
68
+ "step": 77
69
+ }
70
+ ],
71
+ "logging_steps": 10,
72
+ "max_steps": 231,
73
+ "num_input_tokens_seen": 0,
74
+ "num_train_epochs": 3,
75
+ "save_steps": 500,
76
+ "stateful_callbacks": {
77
+ "TrainerControl": {
78
+ "args": {
79
+ "should_epoch_stop": false,
80
+ "should_evaluate": false,
81
+ "should_log": false,
82
+ "should_save": true,
83
+ "should_training_stop": false
84
+ },
85
+ "attributes": {}
86
+ }
87
+ },
88
+ "total_flos": 14154784253280.0,
89
+ "train_batch_size": 8,
90
+ "trial_name": null,
91
+ "trial_params": null
92
+ }
results/checkpoint-77/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f01ff5ce227ceb86297dcffc4b183939337de0f07d7c52948aca1b0412f659
3
+ size 5176
results/checkpoint-77/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
tf_checkpoints/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/Colab Notebooks/text2excel/mod_iter/t2e_cat_class_model0.1",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4",
18
+ "5": "LABEL_5",
19
+ "6": "LABEL_6",
20
+ "7": "LABEL_7",
21
+ "8": "LABEL_8",
22
+ "9": "LABEL_9",
23
+ "10": "LABEL_10",
24
+ "11": "LABEL_11",
25
+ "12": "LABEL_12",
26
+ "13": "LABEL_13",
27
+ "14": "LABEL_14",
28
+ "15": "LABEL_15",
29
+ "16": "LABEL_16",
30
+ "17": "LABEL_17",
31
+ "18": "LABEL_18",
32
+ "19": "LABEL_19"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_3": 3,
51
+ "LABEL_4": 4,
52
+ "LABEL_5": 5,
53
+ "LABEL_6": 6,
54
+ "LABEL_7": 7,
55
+ "LABEL_8": 8,
56
+ "LABEL_9": 9
57
+ },
58
+ "layer_norm_eps": 1e-12,
59
+ "max_position_embeddings": 512,
60
+ "model_type": "bert",
61
+ "num_attention_heads": 12,
62
+ "num_hidden_layers": 12,
63
+ "pad_token_id": 0,
64
+ "position_embedding_type": "absolute",
65
+ "problem_type": "single_label_classification",
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.42.4",
68
+ "type_vocab_size": 2,
69
+ "use_cache": true,
70
+ "vocab_size": 30522
71
+ }
tf_checkpoints/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77675ef8a83caf48e9a5d740375b7916a6326cef07b893d9ccab059386ec71fb
3
+ size 438014016