Create Tokenizer2.py
Browse files- Tokenizer2.py +358 -0
Tokenizer2.py
ADDED
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script defines a custom tokenizer, `SupplyChainTokenizer`, specifically designed
|
3 |
+
for a collaborative predictive supply chain model using Transformer-based
|
4 |
+
architecture. It leverages a custom, industry-specific vocabulary (loaded from
|
5 |
+
`vocab.json`) to prioritize domain-relevant tokens (SKUs, store IDs, plant IDs,
|
6 |
+
promotion types, etc.) while employing Byte-Pair Encoding (BPE) to handle
|
7 |
+
out-of-vocabulary words and variations.
|
8 |
+
|
9 |
+
The script also includes a comprehensive example usage section demonstrating
|
10 |
+
how to create, train, use, save, and load the tokenizer. This tokenizer is a
|
11 |
+
critical component for bridging the gap between raw supply chain data and
|
12 |
+
a Transformer-based forecasting model.
|
13 |
+
"""
|
14 |
+
import json
|
15 |
+
import os
|
16 |
+
from typing import List, Dict, Union, Tuple
|
17 |
+
from tokenizers import (
|
18 |
+
Tokenizer,
|
19 |
+
models,
|
20 |
+
normalizers,
|
21 |
+
pre_tokenizers,
|
22 |
+
decoders,
|
23 |
+
trainers,
|
24 |
+
processors,
|
25 |
+
)
|
26 |
+
from tokenizers.pre_tokenizers import WhitespaceSplit, Digits
|
27 |
+
from tokenizers import Regex
|
28 |
+
import pandas as pd
|
29 |
+
|
30 |
+
class SupplyChainTokenizer:
|
31 |
+
"""
|
32 |
+
A custom tokenizer designed for the Enhanced Business Model for Collaborative
|
33 |
+
Predictive Supply Chain. It prioritizes industry-specific tokens from a
|
34 |
+
`vocab.json` file and uses Byte-Pair Encoding (BPE) for out-of-vocabulary
|
35 |
+
(OOV) words. It handles various data types expected in supply chain data.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_path (str): Path to the `vocab.json` file.
|
39 |
+
max_length (int, optional): Maximum sequence length. Defaults to 512.
|
40 |
+
"""
|
41 |
+
|
42 |
+
def __init__(self, vocab_path: str, max_length: int = 512):
|
43 |
+
if not os.path.exists(vocab_path):
|
44 |
+
raise FileNotFoundError(f"Vocabulary file not found: {vocab_path}")
|
45 |
+
|
46 |
+
self.vocab_path = vocab_path
|
47 |
+
self.max_length = max_length
|
48 |
+
|
49 |
+
# Load the custom vocabulary
|
50 |
+
with open(self.vocab_path, "r", encoding="utf-8") as f:
|
51 |
+
self.vocab = json.load(f)
|
52 |
+
|
53 |
+
# 1. Create the BPE model
|
54 |
+
self.bpe_model = models.BPE(
|
55 |
+
vocab=self.vocab, # Initialize with the custom vocabulary
|
56 |
+
merges=[], # We'll populate merges during training
|
57 |
+
unk_token="[UNK]", # Unknown token
|
58 |
+
)
|
59 |
+
|
60 |
+
# 2. Create a Tokenizer instance
|
61 |
+
self.tokenizer = Tokenizer(self.bpe_model)
|
62 |
+
|
63 |
+
# 3. Normalization (Lowercase and Unicode normalization)
|
64 |
+
self.tokenizer.normalizer = normalizers.Sequence(
|
65 |
+
[normalizers.NFD(), normalizers.Lowercase(), normalizers.StripAccents()]
|
66 |
+
)
|
67 |
+
|
68 |
+
# 4. Pre-tokenization (Splitting into words)
|
69 |
+
self.tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
|
70 |
+
[WhitespaceSplit(), Digits(individual_digits=True)]
|
71 |
+
)
|
72 |
+
|
73 |
+
# 5. Decoder (Convert token IDs back to strings)
|
74 |
+
self.tokenizer.decoder = decoders.BPEDecoder()
|
75 |
+
|
76 |
+
# 6. Post-processing (Special tokens)
|
77 |
+
self.tokenizer.post_processor = processors.TemplateProcessing(
|
78 |
+
single="[CLS] $A [SEP]",
|
79 |
+
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
|
80 |
+
special_tokens=[("[CLS]", self.vocab["[CLS]"]), ("[SEP]", self.vocab["[SEP]"])],
|
81 |
+
)
|
82 |
+
# Adding this, although not used in encode or encode_as_ids
|
83 |
+
self.pad_token_id = self.vocab["[PAD]"]
|
84 |
+
|
85 |
+
def train_bpe(self, files: Union[str, List[str]], vocab_size: int = 30000):
|
86 |
+
"""
|
87 |
+
Trains the BPE model on text files. This updates the `merges` of the
|
88 |
+
BPE model. This is *crucial* for handling words not in the initial
|
89 |
+
`vocab.json`.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
files (Union[str, List[str]]): Path(s) to text file(s) for training.
|
93 |
+
vocab_size (int): The desired vocabulary size (including special tokens
|
94 |
+
and initial vocabulary).
|
95 |
+
"""
|
96 |
+
|
97 |
+
if isinstance(files, str):
|
98 |
+
files = [files]
|
99 |
+
|
100 |
+
# Create a trainer
|
101 |
+
trainer = trainers.BpeTrainer(
|
102 |
+
vocab_size=vocab_size,
|
103 |
+
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"],
|
104 |
+
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), # All single bytes
|
105 |
+
show_progress=True,
|
106 |
+
)
|
107 |
+
|
108 |
+
# Train the tokenizer
|
109 |
+
self.tokenizer.train(files, trainer=trainer)
|
110 |
+
|
111 |
+
|
112 |
+
def encode(self, text: str, text_pair: str = None) -> List[str]:
|
113 |
+
"""
|
114 |
+
Encodes text into a list of tokens.
|
115 |
+
|
116 |
+
Args:
|
117 |
+
text (str): The input text.
|
118 |
+
text_pair (str, optional): An optional second input string.
|
119 |
+
|
120 |
+
Returns:
|
121 |
+
List[str]: A list of tokens.
|
122 |
+
"""
|
123 |
+
encoded = self.tokenizer.encode(text, text_pair)
|
124 |
+
return encoded.tokens
|
125 |
+
|
126 |
+
def encode_as_ids(self, text: str, text_pair: str = None) -> List[int]:
|
127 |
+
"""
|
128 |
+
Encodes text into a list of token IDs.
|
129 |
+
|
130 |
+
Args:
|
131 |
+
text (str): The input text.
|
132 |
+
text_pair (str, optional): An optional second input string.
|
133 |
+
|
134 |
+
Returns:
|
135 |
+
List[int]: A list of token IDs.
|
136 |
+
"""
|
137 |
+
encoded = self.tokenizer.encode(text, text_pair)
|
138 |
+
return encoded.ids
|
139 |
+
|
140 |
+
def decode(self, ids: List[int], skip_special_tokens: bool = True) -> str:
|
141 |
+
"""
|
142 |
+
Decodes a list of token IDs back into a string.
|
143 |
+
|
144 |
+
Args:
|
145 |
+
ids (List[int]): The list of token IDs.
|
146 |
+
skip_special_tokens (bool): Whether to skip special tokens in decoding.
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
str: The decoded string.
|
150 |
+
"""
|
151 |
+
return self.tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
|
152 |
+
|
153 |
+
def token_to_id(self, token: str) -> int:
|
154 |
+
"""
|
155 |
+
Converts a token to its corresponding ID.
|
156 |
+
|
157 |
+
Args:
|
158 |
+
token (str): The token.
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
int: The token ID. Returns None if the token is not in the vocabulary.
|
162 |
+
"""
|
163 |
+
return self.vocab.get(token, self.vocab.get("[UNK]"))
|
164 |
+
|
165 |
+
def id_to_token(self, id_: int) -> str:
|
166 |
+
"""
|
167 |
+
Converts a token ID to its corresponding token.
|
168 |
+
|
169 |
+
Args:
|
170 |
+
id_ (int): The token ID.
|
171 |
+
|
172 |
+
Returns:
|
173 |
+
str: The token. Returns "[UNK]" if the ID is not in the vocabulary.
|
174 |
+
"""
|
175 |
+
# Reverse lookup (efficient if needed frequently)
|
176 |
+
reverse_vocab = {v: k for k, v in self.vocab.items()}
|
177 |
+
return reverse_vocab.get(id_, "[UNK]")
|
178 |
+
|
179 |
+
def get_vocab_size(self) -> int:
|
180 |
+
"""Gets the vocabulary size."""
|
181 |
+
return len(self.vocab)
|
182 |
+
|
183 |
+
|
184 |
+
def save(self, directory: str, prefix: str = None):
|
185 |
+
"""
|
186 |
+
Saves the tokenizer configuration and vocabulary to a directory.
|
187 |
+
|
188 |
+
Args:
|
189 |
+
directory (str): The directory to save to.
|
190 |
+
prefix (str, optional): An optional prefix for the filenames.
|
191 |
+
"""
|
192 |
+
if not os.path.exists(directory):
|
193 |
+
os.makedirs(directory)
|
194 |
+
|
195 |
+
# Save the tokenizer configuration
|
196 |
+
self.tokenizer.save(os.path.join(directory, (prefix + "-" if prefix else "") + "tokenizer.json"))
|
197 |
+
|
198 |
+
# Save a copy of the vocabulary (for easy access)
|
199 |
+
with open(os.path.join(directory, (prefix + "-" if prefix else "") + "vocab.json"), "w", encoding="utf-8") as f:
|
200 |
+
json.dump(self.vocab, f, ensure_ascii=False, indent=4)
|
201 |
+
|
202 |
+
@staticmethod
|
203 |
+
def from_pretrained(directory: str, prefix: str = None):
|
204 |
+
"""
|
205 |
+
Loads a pre-trained tokenizer from a directory.
|
206 |
+
|
207 |
+
Args:
|
208 |
+
directory (str): The directory to load from.
|
209 |
+
prefix (str, optional): The optional prefix used when saving.
|
210 |
+
|
211 |
+
Returns:
|
212 |
+
SupplyChainTokenizer: The loaded tokenizer.
|
213 |
+
"""
|
214 |
+
|
215 |
+
vocab_path = os.path.join(directory, (prefix + "-" if prefix else "") + "vocab.json")
|
216 |
+
|
217 |
+
# You could load the tokenizer.json, but since we have a custom class
|
218 |
+
# with training logic, it's better to reconstruct the object this way.
|
219 |
+
tokenizer = SupplyChainTokenizer(vocab_path)
|
220 |
+
tokenizer.tokenizer = Tokenizer.from_file(os.path.join(directory, (prefix + "-" if prefix else "") + "tokenizer.json"))
|
221 |
+
return tokenizer
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
def prepare_for_model(self, data: pd.DataFrame) -> Tuple[List[List[int]], List[List[int]]]:
|
226 |
+
"""
|
227 |
+
Prepares a Pandas DataFrame for the Transformer model. This is the
|
228 |
+
key method that integrates the tokenizer with the data.
|
229 |
+
|
230 |
+
Args:
|
231 |
+
data (pd.DataFrame): The input DataFrame, expected to have columns
|
232 |
+
like 'timestamp', 'sku', 'store_id', 'quantity', 'price',
|
233 |
+
'discount', 'promotion_id', etc. The exact columns depend on
|
234 |
+
the features you're using.
|
235 |
+
|
236 |
+
Returns:
|
237 |
+
Tuple[List[List[int]], List[List[int]]]: A tuple.
|
238 |
+
1. input_ids: List of token ID sequences for the model.
|
239 |
+
2. attention_mask: List of attention masks (1 for real tokens, 0 for padding).
|
240 |
+
"""
|
241 |
+
input_ids = []
|
242 |
+
attention_masks = []
|
243 |
+
|
244 |
+
for _, row in data.iterrows():
|
245 |
+
# Build the input string. This is where you define *how* your
|
246 |
+
# features are combined into a single sequence.
|
247 |
+
input_string = (
|
248 |
+
f"[CLS] timestamp: {row['timestamp']} "
|
249 |
+
f"sku: {row['sku']} store_id: {row['store_id']} "
|
250 |
+
f"quantity: {row['quantity']} price: {row['price']} "
|
251 |
+
f"discount: {row['discount']} "
|
252 |
+
)
|
253 |
+
# Add promotion information if available
|
254 |
+
if 'promotion_id' in row and not pd.isna(row['promotion_id']):
|
255 |
+
input_string += f"promotion_id: {row['promotion_id']} "
|
256 |
+
# Add any other relevant features here
|
257 |
+
if 'product_category' in row:
|
258 |
+
input_string += f"product_category: {row['product_category']} "
|
259 |
+
# Add other external features
|
260 |
+
input_string += "[SEP]"
|
261 |
+
|
262 |
+
|
263 |
+
# Tokenize
|
264 |
+
encoded = self.tokenizer.encode(input_string)
|
265 |
+
token_ids = encoded.ids
|
266 |
+
attention_mask = encoded.attention_mask
|
267 |
+
|
268 |
+
# Padding (up to max_length)
|
269 |
+
padding_length = self.max_length - len(token_ids)
|
270 |
+
if padding_length > 0:
|
271 |
+
token_ids += [self.pad_token_id] * padding_length
|
272 |
+
attention_mask += [0] * padding_length
|
273 |
+
elif padding_length < 0: # Truncation
|
274 |
+
token_ids = token_ids[:self.max_length]
|
275 |
+
attention_mask = attention_mask[:self.max_length]
|
276 |
+
|
277 |
+
input_ids.append(token_ids)
|
278 |
+
attention_masks.append(attention_mask)
|
279 |
+
|
280 |
+
return input_ids, attention_masks
|
281 |
+
|
282 |
+
|
283 |
+
# Example Usage (Illustrative)
|
284 |
+
if __name__ == "__main__":
|
285 |
+
# --- Create a dummy vocab.json ---
|
286 |
+
vocab = {
|
287 |
+
"[UNK]": 0,
|
288 |
+
"[CLS]": 1,
|
289 |
+
"[SEP]": 2,
|
290 |
+
"[PAD]": 3,
|
291 |
+
"[MASK]": 4,
|
292 |
+
"timestamp:": 5,
|
293 |
+
"sku:": 6,
|
294 |
+
"store_id:": 7,
|
295 |
+
"quantity:": 8,
|
296 |
+
"price:": 9,
|
297 |
+
"discount:": 10,
|
298 |
+
"promotion_id:": 11,
|
299 |
+
"product_category:": 12,
|
300 |
+
"SKU123": 13, # Example SKU
|
301 |
+
"SKU123-RED": 14, # Example SKU variant
|
302 |
+
"SKU123-BLUE": 15,
|
303 |
+
"STORE456": 16, # Example store ID
|
304 |
+
"PLANT789": 17, # Example plant ID
|
305 |
+
"WHOLESALER001": 18, # Example Wholesaler
|
306 |
+
"RETAILER002": 19, # Example Retailer
|
307 |
+
"BOGO": 20,
|
308 |
+
"DISCOUNT":21,
|
309 |
+
}
|
310 |
+
with open("vocab.json", "w") as f:
|
311 |
+
json.dump(vocab, f, indent=4)
|
312 |
+
|
313 |
+
# --- Create the tokenizer ---
|
314 |
+
tokenizer = SupplyChainTokenizer(vocab_path="vocab.json")
|
315 |
+
|
316 |
+
# --- Example training (on a dummy text file) ---
|
317 |
+
with open("training_data.txt", "w", encoding="utf-8") as f:
|
318 |
+
f.write("This is some example text for training the BPE model.\n")
|
319 |
+
f.write("SKU123 is a product. STORE456 is another. plant789 is, too.\n")
|
320 |
+
f.write("This file contains words not in the initial vocabulary.\n")
|
321 |
+
|
322 |
+
tokenizer.train_bpe("training_data.txt", vocab_size=50) # Small vocab for the example
|
323 |
+
|
324 |
+
|
325 |
+
# --- Example encoding ---
|
326 |
+
text = "timestamp: 2024-07-03 sku: SKU123 store_id: STORE456 quantity: 2 price: 10.99 discount: 0.0"
|
327 |
+
encoded_tokens = tokenizer.encode(text)
|
328 |
+
encoded_ids = tokenizer.encode_as_ids(text)
|
329 |
+
print(f"Encoded tokens: {encoded_tokens}")
|
330 |
+
print(f"Encoded IDs: {encoded_ids}")
|
331 |
+
|
332 |
+
decoded_text = tokenizer.decode(encoded_ids)
|
333 |
+
print(f"Decoded text: {decoded_text}")
|
334 |
+
|
335 |
+
# -- Example with DataFrame ---
|
336 |
+
data = {
|
337 |
+
'timestamp': ['2024-07-03 10:00:00', '2024-07-03 11:00:00'],
|
338 |
+
'sku': ['SKU123', 'SKU123-RED'],
|
339 |
+
'store_id': ['STORE456', 'STORE456'],
|
340 |
+
'quantity': [2, 1],
|
341 |
+
'price': [10.99, 12.99],
|
342 |
+
'discount': [0.0, 1.0],
|
343 |
+
'promotion_id': ['BOGO', None],
|
344 |
+
'product_category': ['Electronics', 'Electronics']
|
345 |
+
}
|
346 |
+
df = pd.DataFrame(data)
|
347 |
+
input_ids, attention_masks = tokenizer.prepare_for_model(df)
|
348 |
+
print(f"Input IDs (for model): {input_ids}")
|
349 |
+
print(f"Attention Masks: {attention_masks}")
|
350 |
+
|
351 |
+
# --- Save and load ---
|
352 |
+
tokenizer.save("my_tokenizer")
|
353 |
+
loaded_tokenizer = SupplyChainTokenizer.from_pretrained("my_tokenizer")
|
354 |
+
print(f"Loaded tokenizer vocab size: {loaded_tokenizer.get_vocab_size()}")
|
355 |
+
|
356 |
+
# Clean up example files
|
357 |
+
os.remove("vocab.json")
|
358 |
+
os.remove("training_data.txt")
|