Upload tokenizer implementation
Browse files- tokenizer.py +2 -2
tokenizer.py
CHANGED
@@ -16,7 +16,7 @@ WAVELET_TOKENIZER_CONFIG = {
|
|
16 |
"model_type": "wavelet",
|
17 |
"tokenizer_class": "WaveletTokenizer",
|
18 |
"auto_map": {
|
19 |
-
"AutoTokenizer": ["
|
20 |
}
|
21 |
}
|
22 |
|
@@ -39,7 +39,7 @@ class WaveletTokenizer(PreTrainedTokenizer):
|
|
39 |
**kwargs
|
40 |
):
|
41 |
self.auto_map = {
|
42 |
-
"AutoTokenizer": ["
|
43 |
}
|
44 |
|
45 |
# Set vocab size first
|
|
|
16 |
"model_type": "wavelet",
|
17 |
"tokenizer_class": "WaveletTokenizer",
|
18 |
"auto_map": {
|
19 |
+
"AutoTokenizer": ["tokenizer.WaveletTokenizer", None]
|
20 |
}
|
21 |
}
|
22 |
|
|
|
39 |
**kwargs
|
40 |
):
|
41 |
self.auto_map = {
|
42 |
+
"AutoTokenizer": ["tokenizer.WaveletTokenizer", None]
|
43 |
}
|
44 |
|
45 |
# Set vocab size first
|