Upload tokenizer
Browse files- tokenizer.json +2 -2
- tokenizer_config.json +7 -0
tokenizer.json
CHANGED
@@ -2,13 +2,13 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
"strategy": {
|
11 |
-
"Fixed":
|
12 |
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 71,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
"strategy": {
|
11 |
+
"Fixed": 71
|
12 |
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
tokenizer_config.json
CHANGED
@@ -109,8 +109,15 @@
|
|
109 |
"errors": "replace",
|
110 |
"extra_special_tokens": {},
|
111 |
"full_tokenizer_file": null,
|
|
|
112 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
113 |
"pad_token": "<|endoftext|>",
|
|
|
|
|
|
|
114 |
"tokenizer_class": "GPT2Tokenizer",
|
|
|
|
|
115 |
"unk_token": "<|endoftext|>"
|
116 |
}
|
|
|
109 |
"errors": "replace",
|
110 |
"extra_special_tokens": {},
|
111 |
"full_tokenizer_file": null,
|
112 |
+
"max_length": 146,
|
113 |
"model_max_length": 1000000000000000019884624838656,
|
114 |
+
"pad_to_multiple_of": null,
|
115 |
"pad_token": "<|endoftext|>",
|
116 |
+
"pad_token_type_id": 0,
|
117 |
+
"padding_side": "right",
|
118 |
+
"stride": 0,
|
119 |
"tokenizer_class": "GPT2Tokenizer",
|
120 |
+
"truncation_side": "right",
|
121 |
+
"truncation_strategy": "longest_first",
|
122 |
"unk_token": "<|endoftext|>"
|
123 |
}
|