Upload tokenizer
Browse files- tokenizer.json +2 -2
- tokenizer_config.json +0 -7
tokenizer.json
CHANGED
@@ -2,13 +2,13 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
"strategy": {
|
11 |
-
"Fixed":
|
12 |
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 70,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
"strategy": {
|
11 |
+
"Fixed": 70
|
12 |
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
tokenizer_config.json
CHANGED
@@ -20,9 +20,7 @@
|
|
20 |
},
|
21 |
"errors": "replace",
|
22 |
"full_tokenizer_file": null,
|
23 |
-
"max_length": 32,
|
24 |
"model_max_length": 1000000000000000019884624838656,
|
25 |
-
"pad_to_multiple_of": null,
|
26 |
"pad_token": {
|
27 |
"__type": "AddedToken",
|
28 |
"content": "<|endoftext|>",
|
@@ -31,12 +29,7 @@
|
|
31 |
"rstrip": false,
|
32 |
"single_word": false
|
33 |
},
|
34 |
-
"pad_token_type_id": 0,
|
35 |
-
"padding_side": "right",
|
36 |
-
"stride": 0,
|
37 |
"tokenizer_class": "GPT2Tokenizer",
|
38 |
-
"truncation_side": "right",
|
39 |
-
"truncation_strategy": "longest_first",
|
40 |
"unk_token": {
|
41 |
"__type": "AddedToken",
|
42 |
"content": "<|endoftext|>",
|
|
|
20 |
},
|
21 |
"errors": "replace",
|
22 |
"full_tokenizer_file": null,
|
|
|
23 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
24 |
"pad_token": {
|
25 |
"__type": "AddedToken",
|
26 |
"content": "<|endoftext|>",
|
|
|
29 |
"rstrip": false,
|
30 |
"single_word": false
|
31 |
},
|
|
|
|
|
|
|
32 |
"tokenizer_class": "GPT2Tokenizer",
|
|
|
|
|
33 |
"unk_token": {
|
34 |
"__type": "AddedToken",
|
35 |
"content": "<|endoftext|>",
|