JinghuiLuAstronaut commited on
Commit
0899505
·
verified ·
1 Parent(s): 7e843ce

Upload tokenizer

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenization_baichuan.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Baichuan Inc. All Rights Reserved.
2
+
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {},
38
+ "tokenizer_file": {},
39
+ }
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
41
+
42
+
43
+ class BaichuanTokenizer(PreTrainedTokenizer):
44
+ """
45
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
46
+
47
+ Args:
48
+ vocab_file (`str`):
49
+ Path to the vocabulary file.
50
+ """
51
+
52
+ vocab_files_names = VOCAB_FILES_NAMES
53
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
54
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
55
+ model_input_names = ["input_ids", "attention_mask"]
56
+
57
+ def __init__(
58
+ self,
59
+ vocab_file,
60
+ unk_token="<unk>",
61
+ bos_token="<s>",
62
+ eos_token="</s>",
63
+ pad_token=None,
64
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
65
+ add_bos_token=True,
66
+ add_eos_token=False,
67
+ clean_up_tokenization_spaces=False,
68
+ **kwargs,
69
+ ):
70
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
71
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
72
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
73
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
74
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
75
+
76
+ self.vocab_file = vocab_file
77
+ self.add_bos_token = add_bos_token
78
+ self.add_eos_token = add_eos_token
79
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
80
+ self.sp_model.Load(vocab_file)
81
+ super().__init__(
82
+ bos_token=bos_token,
83
+ eos_token=eos_token,
84
+ unk_token=unk_token,
85
+ pad_token=pad_token,
86
+ add_bos_token=add_bos_token,
87
+ add_eos_token=add_eos_token,
88
+ sp_model_kwargs=self.sp_model_kwargs,
89
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
90
+ **kwargs,
91
+ )
92
+
93
+ def __getstate__(self):
94
+ state = self.__dict__.copy()
95
+ state["sp_model"] = None
96
+ return state
97
+
98
+ def __setstate__(self, d):
99
+ self.__dict__ = d
100
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
101
+ self.sp_model.Load(self.vocab_file)
102
+
103
+ @property
104
+ def vocab_size(self):
105
+ """Returns vocab size"""
106
+ return self.sp_model.get_piece_size()
107
+
108
+ def get_vocab(self):
109
+ """Returns vocab as a dict"""
110
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
111
+ vocab.update(self.added_tokens_encoder)
112
+ return vocab
113
+
114
+ def _tokenize(self, text):
115
+ """Returns a tokenized string."""
116
+ return self.sp_model.encode(text, out_type=str)
117
+
118
+ def _convert_token_to_id(self, token):
119
+ """Converts a token (str) in an id using the vocab."""
120
+ return self.sp_model.piece_to_id(token)
121
+
122
+ def _convert_id_to_token(self, index):
123
+ """Converts an index (integer) in a token (str) using the vocab."""
124
+ token = self.sp_model.IdToPiece(index)
125
+ return token
126
+
127
+ def convert_tokens_to_string(self, tokens):
128
+ """Converts a sequence of tokens (string) in a single string."""
129
+ current_sub_tokens = []
130
+ out_string = ""
131
+ prev_is_special = False
132
+ for i, token in enumerate(tokens):
133
+ # make sure that special tokens are not decoded using sentencepiece model
134
+ if token in self.all_special_tokens:
135
+ if not prev_is_special and i != 0:
136
+ out_string += " "
137
+ out_string += self.sp_model.decode(current_sub_tokens) + token
138
+ prev_is_special = True
139
+ current_sub_tokens = []
140
+ else:
141
+ current_sub_tokens.append(token)
142
+ prev_is_special = False
143
+ out_string += self.sp_model.decode(current_sub_tokens)
144
+ return out_string
145
+
146
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
147
+ """
148
+ Save the vocabulary and special tokens file to a directory.
149
+
150
+ Args:
151
+ save_directory (`str`):
152
+ The directory in which to save the vocabulary.
153
+
154
+ Returns:
155
+ `Tuple(str)`: Paths to the files saved.
156
+ """
157
+ if not os.path.isdir(save_directory):
158
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
159
+ return
160
+ out_vocab_file = os.path.join(
161
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
162
+ )
163
+
164
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
165
+ copyfile(self.vocab_file, out_vocab_file)
166
+ elif not os.path.isfile(self.vocab_file):
167
+ with open(out_vocab_file, "wb") as fi:
168
+ content_spiece_model = self.sp_model.serialized_model_proto()
169
+ fi.write(content_spiece_model)
170
+
171
+ return (out_vocab_file,)
172
+
173
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
174
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
175
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
176
+
177
+ output = bos_token_id + token_ids_0 + eos_token_id
178
+
179
+ if token_ids_1 is not None:
180
+ output = output + bos_token_id + token_ids_1 + eos_token_id
181
+
182
+ return output
183
+
184
+ def get_special_tokens_mask(
185
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
186
+ ) -> List[int]:
187
+ """
188
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
189
+ special tokens using the tokenizer `prepare_for_model` method.
190
+
191
+ Args:
192
+ token_ids_0 (`List[int]`):
193
+ List of IDs.
194
+ token_ids_1 (`List[int]`, *optional*):
195
+ Optional second list of IDs for sequence pairs.
196
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
197
+ Whether or not the token list is already formatted with special tokens for the model.
198
+
199
+ Returns:
200
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
201
+ """
202
+ if already_has_special_tokens:
203
+ return super().get_special_tokens_mask(
204
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
205
+ )
206
+
207
+ bos_token_id = [1] if self.add_bos_token else []
208
+ eos_token_id = [1] if self.add_eos_token else []
209
+
210
+ if token_ids_1 is None:
211
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
212
+ return (
213
+ bos_token_id
214
+ + ([0] * len(token_ids_0))
215
+ + eos_token_id
216
+ + bos_token_id
217
+ + ([0] * len(token_ids_1))
218
+ + eos_token_id
219
+ )
220
+
221
+ def create_token_type_ids_from_sequences(
222
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
223
+ ) -> List[int]:
224
+ """
225
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
226
+ sequence pair mask has the following format:
227
+
228
+ ```
229
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
230
+ | first sequence | second sequence |
231
+ ```
232
+
233
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
234
+
235
+ Args:
236
+ token_ids_0 (`List[int]`):
237
+ List of ids.
238
+ token_ids_1 (`List[int]`, *optional*):
239
+ Optional second list of IDs for sequence pairs.
240
+
241
+ Returns:
242
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
243
+ """
244
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
245
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
246
+
247
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
248
+
249
+ if token_ids_1 is not None:
250
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
251
+
252
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
3
+ size 2001107
tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "auto_map": {
31
+ "AutoTokenizer": [
32
+ "tokenization_baichuan.BaichuanTokenizer",
33
+ null
34
+ ]
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "model_max_length": 4096,
40
+ "pad_token": "<unk>",
41
+ "padding_side": "left",
42
+ "sp_model_kwargs": {},
43
+ "tokenizer_class": "BaichuanTokenizer",
44
+ "unk_token": "<unk>",
45
+ "use_fast": false
46
+ }