jumelet commited on
Commit
4b6c2fb
·
1 Parent(s): 1822f7b

Training in progress, step 1

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_epsilon": 1e-05,
20
+ "model_type": "gpt2",
21
+ "n_ctx": 1024,
22
+ "n_embd": 16,
23
+ "n_head": 2,
24
+ "n_inner": null,
25
+ "n_layer": 2,
26
+ "n_positions": 30,
27
+ "reorder_and_upcast_attn": false,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_by_inverse_layer_idx": false,
30
+ "scale_attn_weights": true,
31
+ "summary_activation": null,
32
+ "summary_first_dropout": 0.1,
33
+ "summary_proj_to_labels": true,
34
+ "summary_type": "cls_index",
35
+ "summary_use_proj": true,
36
+ "task_specific_params": {
37
+ "text-generation": {
38
+ "do_sample": true,
39
+ "max_length": 50
40
+ }
41
+ },
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.30.2",
44
+ "use_cache": true,
45
+ "vocab_size": 70
46
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f557fea27b9647d605e046df3300af1f6f5c072fdc9444abf6490a8129e11d
3
+ size 41730
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<pad>",
5
+ "<mask>",
6
+ "<bos>"
7
+ ],
8
+ "bos_token": "<bos>",
9
+ "mask_token": "<mask>",
10
+ "pad_token": "<pad>",
11
+ "unk_token": "<unk>"
12
+ }
tokenizer.json ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "<unk>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "<pad>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "<mask>",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "<bos>",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ }
42
+ ],
43
+ "normalizer": null,
44
+ "pre_tokenizer": {
45
+ "type": "WhitespaceSplit"
46
+ },
47
+ "post_processor": {
48
+ "type": "TemplateProcessing",
49
+ "single": [
50
+ {
51
+ "SpecialToken": {
52
+ "id": "<bos>",
53
+ "type_id": 0
54
+ }
55
+ },
56
+ {
57
+ "Sequence": {
58
+ "id": "A",
59
+ "type_id": 0
60
+ }
61
+ }
62
+ ],
63
+ "pair": [
64
+ {
65
+ "Sequence": {
66
+ "id": "A",
67
+ "type_id": 0
68
+ }
69
+ },
70
+ {
71
+ "Sequence": {
72
+ "id": "B",
73
+ "type_id": 1
74
+ }
75
+ }
76
+ ],
77
+ "special_tokens": {
78
+ "<bos>": {
79
+ "id": "<bos>",
80
+ "ids": [
81
+ 3
82
+ ],
83
+ "tokens": [
84
+ "<bos>"
85
+ ]
86
+ }
87
+ }
88
+ },
89
+ "decoder": null,
90
+ "model": {
91
+ "type": "WordLevel",
92
+ "vocab": {
93
+ "<unk>": 0,
94
+ "<pad>": 1,
95
+ "<mask>": 2,
96
+ "<bos>": 3,
97
+ ".": 4,
98
+ "the": 5,
99
+ "and": 6,
100
+ ",": 7,
101
+ "it": 8,
102
+ "had": 9,
103
+ "in": 10,
104
+ "was": 11,
105
+ "he": 12,
106
+ "here": 13,
107
+ "Bible": 14,
108
+ "n<apostrophe>t": 15,
109
+ "to": 16,
110
+ "He": 17,
111
+ "a": 18,
112
+ "boy": 19,
113
+ "But": 20,
114
+ "Oh": 21,
115
+ "knew": 22,
116
+ "<apostrophe>s": 23,
117
+ "moved": 24,
118
+ "none": 25,
119
+ "that": 26,
120
+ "Today": 27,
121
+ "finger": 28,
122
+ "help": 29,
123
+ "quite": 30,
124
+ "should": 31,
125
+ "justice": 32,
126
+ "really": 33,
127
+ "then": 34,
128
+ "told": 35,
129
+ "virtue": 36,
130
+ "by": 37,
131
+ "did": 38,
132
+ "ditch": 39,
133
+ "meant": 40,
134
+ "now": 41,
135
+ "of": 42,
136
+ "care": 43,
137
+ "finally": 44,
138
+ "find": 45,
139
+ "mattered": 46,
140
+ "them": 47,
141
+ "world": 48,
142
+ ":": 49,
143
+ "Tuesday": 50,
144
+ "happened": 51,
145
+ "intended": 52,
146
+ "not": 53,
147
+ "they": 54,
148
+ "what": 55,
149
+ "And": 56,
150
+ "They": 57,
151
+ "class": 58,
152
+ "leave": 59,
153
+ "rising": 60,
154
+ "saw": 61,
155
+ "Because": 62,
156
+ "like": 63,
157
+ "lip": 64,
158
+ "narrowed": 65,
159
+ "sure": 66,
160
+ "there": 67,
161
+ "cows": 68,
162
+ "quivered": 69
163
+ },
164
+ "unk_token": "<unk>"
165
+ }
166
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<pad>",
5
+ "<mask>",
6
+ "<bos>"
7
+ ],
8
+ "bos_token": "<bos>",
9
+ "clean_up_tokenization_spaces": true,
10
+ "mask_token": "<mask>",
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "pad_token": "<pad>",
13
+ "tokenizer_class": "PreTrainedTokenizerFast",
14
+ "unk_token": "<unk>"
15
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06c3edcf4027dd0552bcd8a57cbd2eb4e8d4aaa09f3f0e4b8ec6cf52bf14d4fe
3
+ size 4408