Upload tokenizer files (vocab, config, README)
Browse files- tokenizer.json +7 -7
tokenizer.json
CHANGED
@@ -52,6 +52,12 @@
|
|
52 |
"type": "WordPiece",
|
53 |
"unk_token": "<unk>"
|
54 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
"model": {
|
56 |
"type": "WordLevel",
|
57 |
"vocab": {
|
@@ -1056,12 +1062,6 @@
|
|
1056 |
"levels": 998,
|
1057 |
"Australia": 999
|
1058 |
},
|
1059 |
-
"unk_token": "<unk>"
|
1060 |
-
"special": [
|
1061 |
-
0,
|
1062 |
-
1,
|
1063 |
-
2,
|
1064 |
-
3
|
1065 |
-
]
|
1066 |
}
|
1067 |
}
|
|
|
52 |
"type": "WordPiece",
|
53 |
"unk_token": "<unk>"
|
54 |
},
|
55 |
+
"special": [
|
56 |
+
0,
|
57 |
+
1,
|
58 |
+
2,
|
59 |
+
3
|
60 |
+
],
|
61 |
"model": {
|
62 |
"type": "WordLevel",
|
63 |
"vocab": {
|
|
|
1062 |
"levels": 998,
|
1063 |
"Australia": 999
|
1064 |
},
|
1065 |
+
"unk_token": "<unk>"
|
|
|
|
|
|
|
|
|
|
|
|
|
1066 |
}
|
1067 |
}
|