Shengkun commited on
Commit
00dd310
·
verified ·
1 Parent(s): 5c80d11

Upload tokenizer

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenization_llama.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for LLaMA."""
22
+
23
+ import os
24
+ from shutil import copyfile
25
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
26
+
27
+ import sentencepiece as spm
28
+
29
+ from transformers.convert_slow_tokenizer import import_protobuf
30
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
31
+ from transformers.utils import logging
32
+
33
+
34
+ if TYPE_CHECKING:
35
+ from transformers.tokenization_utils_base import TextInput
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
40
+
41
+ SPIECE_UNDERLINE = "▁"
42
+
43
+ B_INST, E_INST = "[INST]", "[/INST]"
44
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
45
+
46
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
47
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
48
+ that your responses are socially unbiased and positive in nature.
49
+
50
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
51
+ correct. If you don't know the answer to a question, please don't share false information.""" # fmt: skip
52
+
53
+
54
+ class LlamaTokenizer(PreTrainedTokenizer):
55
+ """
56
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
57
+ no padding token in the original model.
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
63
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
64
+ token instead.
65
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
66
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
67
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
68
+ The end of sequence token.
69
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
70
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
71
+ attention mechanisms or loss computation.
72
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
73
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
74
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
75
+ to set:
76
+
77
+ - `enable_sampling`: Enable subword regularization.
78
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
79
+
80
+ - `nbest_size = {0,1}`: No sampling is performed.
81
+ - `nbest_size > 1`: samples from the nbest_size results.
82
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
83
+ using forward-filtering-and-backward-sampling algorithm.
84
+
85
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
86
+ BPE-dropout.
87
+
88
+ add_bos_token (`bool`, *optional*, defaults to `True`):
89
+ Whether or not to add an `bos_token` at the start of sequences.
90
+ add_eos_token (`bool`, *optional*, defaults to `False`):
91
+ Whether or not to add an `eos_token` at the end of sequences.
92
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
93
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
94
+ extra spaces.
95
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
96
+ Whether or not the default system prompt for Llama should be used.
97
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
98
+ Whether or not to add spaces between special tokens.
99
+ legacy (`bool`, *optional*):
100
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
101
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens.
102
+ Make sure to also set `from_slow` to `True`.
103
+ A simple example:
104
+
105
+ - `legacy=True`:
106
+ ```python
107
+ >>> from transformers import LlamaTokenizerFast
108
+
109
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
110
+ >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
111
+ [1, 15043, 29871, 1, 869]
112
+ ```
113
+ - `legacy=False`:
114
+ ```python
115
+ >>> from transformers import LlamaTokenizerFast
116
+
117
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
118
+ >>> tokenizer.encode("Hello <s>.") # 29889 is '.'
119
+ [1, 15043, 29871, 1, 29889]
120
+ ```
121
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
122
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
123
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
124
+ other word. Again, this should be set with `from_slow=True` to make sure it's taken into account.
125
+ """
126
+
127
+ vocab_files_names = VOCAB_FILES_NAMES
128
+ model_input_names = ["input_ids", "attention_mask"]
129
+
130
+ def __init__(
131
+ self,
132
+ vocab_file,
133
+ unk_token="<unk>",
134
+ bos_token="<s>",
135
+ eos_token="</s>",
136
+ pad_token=None,
137
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
138
+ add_bos_token=True,
139
+ add_eos_token=False,
140
+ clean_up_tokenization_spaces=False,
141
+ use_default_system_prompt=False,
142
+ spaces_between_special_tokens=False,
143
+ legacy=None,
144
+ add_prefix_space=True,
145
+ **kwargs,
146
+ ):
147
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
148
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
149
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
150
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
151
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
152
+
153
+ if legacy is None:
154
+ logger.warning_once(
155
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
156
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
157
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
158
+ " means, and thoroughly read the reason why this was added as explained in"
159
+ " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file"
160
+ " you can ignore this message"
161
+ )
162
+ legacy = True
163
+
164
+ self.legacy = legacy
165
+ self.vocab_file = vocab_file
166
+ self.add_bos_token = add_bos_token
167
+ self.add_eos_token = add_eos_token
168
+ self.use_default_system_prompt = use_default_system_prompt
169
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
170
+ self.add_prefix_space = add_prefix_space
171
+
172
+ super().__init__(
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ unk_token=unk_token,
176
+ pad_token=pad_token,
177
+ add_bos_token=add_bos_token,
178
+ add_eos_token=add_eos_token,
179
+ sp_model_kwargs=self.sp_model_kwargs,
180
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
181
+ use_default_system_prompt=use_default_system_prompt,
182
+ spaces_between_special_tokens=spaces_between_special_tokens,
183
+ legacy=legacy,
184
+ add_prefix_space=add_prefix_space,
185
+ **kwargs,
186
+ )
187
+
188
+ @property
189
+ def unk_token_length(self):
190
+ return len(self.sp_model.encode(str(self.unk_token)))
191
+
192
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
193
+ def get_spm_processor(self, from_slow=False):
194
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
195
+ if self.legacy or from_slow: # no dependency on protobuf
196
+ tokenizer.Load(self.vocab_file)
197
+ return tokenizer
198
+
199
+ with open(self.vocab_file, "rb") as f:
200
+ sp_model = f.read()
201
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
202
+ model = model_pb2.ModelProto.FromString(sp_model)
203
+ normalizer_spec = model_pb2.NormalizerSpec()
204
+ normalizer_spec.add_dummy_prefix = False
205
+ model.normalizer_spec.MergeFrom(normalizer_spec)
206
+ sp_model = model.SerializeToString()
207
+ tokenizer.LoadFromSerializedProto(sp_model)
208
+ return tokenizer
209
+
210
+ def __getstate__(self):
211
+ state = self.__dict__.copy()
212
+ state["sp_model"] = None
213
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
214
+ return state
215
+
216
+ def __setstate__(self, d):
217
+ self.__dict__.update(d)
218
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
219
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
220
+
221
+ @property
222
+ def vocab_size(self):
223
+ """Returns vocab size"""
224
+ return self.sp_model.get_piece_size()
225
+
226
+ def get_vocab(self):
227
+ """Returns vocab as a dict"""
228
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
229
+ vocab.update(self.added_tokens_encoder)
230
+ return vocab
231
+
232
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
233
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
234
+ """
235
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
236
+ first token is special.
237
+ """
238
+ if self.legacy or len(text) == 0:
239
+ return super().tokenize(text, **kwargs)
240
+
241
+ text = text.replace(SPIECE_UNDERLINE, " ")
242
+ if self.add_prefix_space:
243
+ text = SPIECE_UNDERLINE + text
244
+
245
+ tokens = super().tokenize(text, **kwargs)
246
+
247
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
248
+ tokens = tokens[1:]
249
+ return tokens
250
+
251
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
252
+ def _tokenize(self, text, **kwargs):
253
+ """
254
+ Returns a tokenized string.
255
+
256
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
257
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
258
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
259
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
260
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
261
+ """
262
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
263
+ return self.sp_model.encode(text, out_type=str)
264
+
265
+ # 1. Encode string + prefix ex: "<unk> Hey"
266
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
267
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
268
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
269
+
270
+ def _convert_token_to_id(self, token):
271
+ """Converts a token (str) in an id using the vocab."""
272
+ return self.sp_model.piece_to_id(token)
273
+
274
+ def _convert_id_to_token(self, index):
275
+ """Converts an index (integer) in a token (str) using the vocab."""
276
+ token = self.sp_model.IdToPiece(index)
277
+ return token
278
+
279
+ def convert_tokens_to_string(self, tokens):
280
+ """Converts a sequence of tokens (string) in a single string."""
281
+ # since we manually add the prefix space, we have to remove it when decoding
282
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
283
+ tokens[0] = tokens[0][1:]
284
+
285
+ current_sub_tokens = []
286
+ out_string = ""
287
+ prev_is_special = False
288
+ for i, token in enumerate(tokens):
289
+ # make sure that special tokens are not decoded using sentencepiece model
290
+ if token in self.all_special_tokens:
291
+ if not prev_is_special and i != 0 and self.legacy:
292
+ out_string += " "
293
+ out_string += self.sp_model.decode(current_sub_tokens) + token
294
+ prev_is_special = True
295
+ current_sub_tokens = []
296
+ else:
297
+ if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE):
298
+ out_string += " "
299
+ current_sub_tokens.append(token)
300
+ prev_is_special = False
301
+ out_string += self.sp_model.decode(current_sub_tokens)
302
+ return out_string
303
+
304
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
305
+ """
306
+ Save the vocabulary and special tokens file to a directory.
307
+
308
+ Args:
309
+ save_directory (`str`):
310
+ The directory in which to save the vocabulary.
311
+
312
+ Returns:
313
+ `Tuple(str)`: Paths to the files saved.
314
+ """
315
+ if not os.path.isdir(save_directory):
316
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
317
+ return
318
+ out_vocab_file = os.path.join(
319
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
320
+ )
321
+
322
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
323
+ copyfile(self.vocab_file, out_vocab_file)
324
+ elif not os.path.isfile(self.vocab_file):
325
+ with open(out_vocab_file, "wb") as fi:
326
+ content_spiece_model = self.sp_model.serialized_model_proto()
327
+ fi.write(content_spiece_model)
328
+
329
+ return (out_vocab_file,)
330
+
331
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
332
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
333
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
334
+
335
+ output = bos_token_id + token_ids_0 + eos_token_id
336
+
337
+ if token_ids_1 is not None:
338
+ output = output + bos_token_id + token_ids_1 + eos_token_id
339
+
340
+ return output
341
+
342
+ def get_special_tokens_mask(
343
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
344
+ ) -> List[int]:
345
+ """
346
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
347
+ special tokens using the tokenizer `prepare_for_model` method.
348
+
349
+ Args:
350
+ token_ids_0 (`List[int]`):
351
+ List of IDs.
352
+ token_ids_1 (`List[int]`, *optional*):
353
+ Optional second list of IDs for sequence pairs.
354
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
355
+ Whether or not the token list is already formatted with special tokens for the model.
356
+
357
+ Returns:
358
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
359
+ """
360
+ if already_has_special_tokens:
361
+ return super().get_special_tokens_mask(
362
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
363
+ )
364
+
365
+ bos_token_id = [1] if self.add_bos_token else []
366
+ eos_token_id = [1] if self.add_eos_token else []
367
+
368
+ if token_ids_1 is None:
369
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
370
+ return (
371
+ bos_token_id
372
+ + ([0] * len(token_ids_0))
373
+ + eos_token_id
374
+ + bos_token_id
375
+ + ([0] * len(token_ids_1))
376
+ + eos_token_id
377
+ )
378
+
379
+ def create_token_type_ids_from_sequences(
380
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
381
+ ) -> List[int]:
382
+ """
383
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
384
+ sequence pair mask has the following format:
385
+
386
+ ```
387
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
388
+ | first sequence | second sequence |
389
+ ```
390
+
391
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
392
+
393
+ Args:
394
+ token_ids_0 (`List[int]`):
395
+ List of ids.
396
+ token_ids_1 (`List[int]`, *optional*):
397
+ Optional second list of IDs for sequence pairs.
398
+
399
+ Returns:
400
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
401
+ """
402
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
403
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
404
+
405
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
406
+
407
+ if token_ids_1 is not None:
408
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
409
+
410
+ return output
411
+
412
+
413
+ __all__ = ["LlamaTokenizer"]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "auto_map": {
32
+ "AutoTokenizer": [
33
+ "tokenization_llama.LlamaTokenizer",
34
+ null
35
+ ]
36
+ },
37
+ "bos_token": "<s>",
38
+ "clean_up_tokenization_spaces": false,
39
+ "eos_token": "</s>",
40
+ "legacy": true,
41
+ "model_max_length": 1000000000000000019884624838656,
42
+ "pad_token": null,
43
+ "sp_model_kwargs": {},
44
+ "spaces_between_special_tokens": false,
45
+ "tokenizer_class": "LlamaTokenizer",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": false
48
+ }