DACNTT2 commited on
Commit
ef82295
·
1 Parent(s): 5001557

Delete tokenization_phobert_fast.py

Browse files
Files changed (1) hide show
  1. tokenization_phobert_fast.py +0 -328
tokenization_phobert_fast.py DELETED
@@ -1,328 +0,0 @@
1
- # coding=utf-8
2
- # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
- # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """ Tokenization classes for PhoBERT"""
17
-
18
- import os
19
- from collections import defaultdict
20
- from shutil import copyfile
21
- from typing import Any, Dict, List, Optional, Tuple, Union
22
-
23
- from transformers.tokenization_utils_base import EncodingFast
24
-
25
- from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
26
- from transformers.utils import logging
27
- from .tokenization_phobert import PhobertTokenizer
28
-
29
-
30
- logger = logging.get_logger(__name__)
31
-
32
- VOCAB_FILES_NAMES = {
33
- "vocab_file": "vocab.txt",
34
- "merges_file": "bpe.codes",
35
- "tokenizer_file": "tokenizer.json",
36
- }
37
-
38
- PRETRAINED_VOCAB_FILES_MAP = {
39
- "vocab_file": {
40
- "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
41
- "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
42
- },
43
- "merges_file": {
44
- "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
45
- "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
46
- },
47
- "tokenizer_file": {
48
- "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/tokenizer.json",
49
- "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/tokenizer.json",
50
- },
51
- }
52
-
53
- PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
54
- "vinai/phobert-base": 256,
55
- "vinai/phobert-large": 256,
56
- }
57
-
58
-
59
- class PhobertTokenizerFast(PreTrainedTokenizerFast):
60
- """
61
- Construct a "Fast" BPE tokenizer for PhoBERT (backed by HuggingFace's *tokenizers* library).
62
-
63
- Peculiarities:
64
-
65
- - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
66
- a punctuation character will be treated separately.
67
-
68
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
69
- superclass for more information regarding methods.
70
-
71
- Args:
72
- vocab_file (`str`):
73
- Path to the vocabulary file.
74
- merges_file (`str`):
75
- Path to the merges file.
76
- """
77
-
78
- vocab_files_names = VOCAB_FILES_NAMES
79
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
80
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
81
- model_input_names = ["input_ids", "attention_mask"]
82
- slow_tokenizer_class = PhobertTokenizer
83
-
84
- def __init__(
85
- self,
86
- vocab_file=None,
87
- merges_file=None,
88
- tokenizer_file=None,
89
- bos_token="<s>",
90
- eos_token="</s>",
91
- sep_token="</s>",
92
- cls_token="<s>",
93
- unk_token="<unk>",
94
- pad_token="<pad>",
95
- mask_token="<mask>",
96
- **kwargs
97
- ):
98
- super().__init__(
99
- vocab_file,
100
- merges_file,
101
- tokenizer_file=tokenizer_file,
102
- bos_token=bos_token,
103
- eos_token=eos_token,
104
- sep_token=sep_token,
105
- cls_token=cls_token,
106
- unk_token=unk_token,
107
- pad_token=pad_token,
108
- mask_token=mask_token,
109
- **kwargs,
110
- )
111
-
112
- self.vocab_file = vocab_file
113
- self.merges_file = merges_file
114
- self.can_save_slow_tokenizer = False if not self.vocab_file else True
115
-
116
- def get_added_vocab_hacking(self):
117
- """
118
- Returns the added tokens in the vocabulary as a dictionary of token to index.
119
-
120
- Returns:
121
- `Dict[str, int], Dict[int, int]`: The added tokens, and their original and new ids
122
- """
123
- base_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=False)
124
- full_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=True)
125
- if full_vocab_size == base_vocab_size:
126
- return {}, {}
127
-
128
- # Tokens in added_vocab should have ids that are equal to or larger than the size of base_vocab
129
- added_vocab = dict(
130
- (self._tokenizer.id_to_token(index), index + 1 - base_vocab_size + self.mask_token_id)
131
- for index in range(base_vocab_size, full_vocab_size)
132
- )
133
-
134
- id_mapping = dict((index, self._tokenizer.token_to_id(tok)) for tok, index in added_vocab.items())
135
-
136
- return added_vocab, id_mapping
137
-
138
- def _decode(
139
- self,
140
- token_ids: Union[int, List[int]],
141
- skip_special_tokens: bool = False,
142
- clean_up_tokenization_spaces: bool = True,
143
- **kwargs
144
- ) -> str:
145
- self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
146
-
147
- if isinstance(token_ids, int):
148
- token_ids = [token_ids]
149
-
150
- # Mapping ids into their original values
151
- _, id_mapping = self.get_added_vocab_hacking()
152
- if len(id_mapping) > 0:
153
- token_ids = [id_mapping[id] if id in id_mapping else id for id in token_ids]
154
-
155
- text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
156
-
157
- if clean_up_tokenization_spaces:
158
- clean_text = self.clean_up_tokenization(text)
159
- return clean_text
160
- else:
161
- return text
162
-
163
- def _convert_encoding(
164
- self,
165
- encoding: EncodingFast,
166
- return_token_type_ids: Optional[bool] = None,
167
- return_attention_mask: Optional[bool] = None,
168
- return_overflowing_tokens: bool = False,
169
- return_special_tokens_mask: bool = False,
170
- return_offsets_mapping: bool = False,
171
- return_length: bool = False,
172
- verbose: bool = True,
173
- ) -> Tuple[Dict[str, Any], List[EncodingFast]]:
174
- """
175
- Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
176
- of encodings, take care of building a batch from overflowing tokens.
177
-
178
- Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
179
- lists (overflows) of lists (tokens).
180
-
181
- Output shape: (overflows, sequence length)
182
- """
183
- if return_token_type_ids is None:
184
- return_token_type_ids = "token_type_ids" in self.model_input_names
185
- if return_attention_mask is None:
186
- return_attention_mask = "attention_mask" in self.model_input_names
187
-
188
- if return_overflowing_tokens and encoding.overflowing is not None:
189
- encodings = [encoding] + encoding.overflowing
190
- else:
191
- encodings = [encoding]
192
-
193
- encoding_dict = defaultdict(list)
194
- added_vocab, _ = self.get_added_vocab_hacking()
195
- for e in encodings:
196
- # encoding_dict["input_ids"].append(e.ids)
197
- # Reassign ids of tokens due to the hacking strategy
198
- ids = []
199
- for id, token in zip(e.ids, e.tokens):
200
- if id <= self.mask_token_id:
201
- ids.append(id)
202
- else:
203
- if token.strip() in added_vocab:
204
- ids.append(added_vocab[token.strip()])
205
- else:
206
- ids.append(self.unk_token_id)
207
-
208
- encoding_dict["input_ids"].append(ids)
209
-
210
- if return_token_type_ids:
211
- encoding_dict["token_type_ids"].append(e.type_ids)
212
- if return_attention_mask:
213
- encoding_dict["attention_mask"].append(e.attention_mask)
214
- if return_special_tokens_mask:
215
- encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
216
- if return_offsets_mapping:
217
- encoding_dict["offset_mapping"].append(e.offsets)
218
- if return_length:
219
- # encoding_dict["length"].append(len(e.ids))
220
- encoding_dict["length"].append(len(ids))
221
-
222
- return encoding_dict, encodings
223
-
224
- def build_inputs_with_special_tokens(
225
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
226
- ) -> List[int]:
227
- """
228
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
229
- adding special tokens. A PhoBERT sequence has the following format:
230
-
231
- - single sequence: `<s> X </s>`
232
- - pair of sequences: `<s> A </s></s> B </s>`
233
-
234
- Args:
235
- token_ids_0 (`List[int]`):
236
- List of IDs to which the special tokens will be added.
237
- token_ids_1 (`List[int]`, *optional*):
238
- Optional second list of IDs for sequence pairs.
239
-
240
- Returns:
241
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
242
- """
243
-
244
- if token_ids_1 is None:
245
- return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
246
- cls = [self.cls_token_id]
247
- sep = [self.sep_token_id]
248
- return cls + token_ids_0 + sep + sep + token_ids_1 + sep
249
-
250
- def get_special_tokens_mask(
251
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
252
- ) -> List[int]:
253
- """
254
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
255
- special tokens using the tokenizer `prepare_for_model` method.
256
-
257
- Args:
258
- token_ids_0 (`List[int]`):
259
- List of IDs.
260
- token_ids_1 (`List[int]`, *optional*):
261
- Optional second list of IDs for sequence pairs.
262
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
263
- Whether or not the token list is already formatted with special tokens for the model.
264
-
265
- Returns:
266
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
267
- """
268
-
269
- if already_has_special_tokens:
270
- return super().get_special_tokens_mask(
271
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
272
- )
273
-
274
- if token_ids_1 is None:
275
- return [1] + ([0] * len(token_ids_0)) + [1]
276
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
277
-
278
- def create_token_type_ids_from_sequences(
279
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
280
- ) -> List[int]:
281
- """
282
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not
283
- make use of token type ids, therefore a list of zeros is returned.
284
-
285
- Args:
286
- token_ids_0 (`List[int]`):
287
- List of IDs.
288
- token_ids_1 (`List[int]`, *optional*):
289
- Optional second list of IDs for sequence pairs.
290
-
291
- Returns:
292
- `List[int]`: List of zeros.
293
-
294
- """
295
-
296
- sep = [self.sep_token_id]
297
- cls = [self.cls_token_id]
298
-
299
- if token_ids_1 is None:
300
- return len(cls + token_ids_0 + sep) * [0]
301
- return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
302
-
303
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
304
- if not self.can_save_slow_tokenizer:
305
- raise ValueError(
306
- "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
307
- "tokenizer."
308
- )
309
-
310
- if not os.path.isdir(save_directory):
311
- logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
312
- return
313
-
314
- out_vocab_file = os.path.join(
315
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
316
- )
317
-
318
- out_merges_file = os.path.join(
319
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
320
- )
321
-
322
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
323
- copyfile(self.vocab_file, out_vocab_file)
324
-
325
- if os.path.abspath(self.merges_file) != os.path.abspath(out_merges_file):
326
- copyfile(self.merges_file, out_merges_file)
327
-
328
- return (out_vocab_file, out_merges_file)