|
""" |
|
This script defines a custom tokenizer, `SupplyChainTokenizer`, specifically designed |
|
for a collaborative predictive supply chain model using Transformer-based |
|
architecture. It leverages a custom, industry-specific vocabulary (loaded from |
|
`vocab.json`) to prioritize domain-relevant tokens (SKUs, store IDs, plant IDs, |
|
promotion types, etc.) while employing Byte-Pair Encoding (BPE) to handle |
|
out-of-vocabulary words and variations. |
|
|
|
The script also includes a comprehensive example usage section demonstrating |
|
how to create, train, use, save, and load the tokenizer. This tokenizer is a |
|
critical component for bridging the gap between raw supply chain data and |
|
a Transformer-based forecasting model. |
|
""" |
|
import json |
|
import os |
|
from typing import List, Dict, Union, Tuple |
|
from tokenizers import ( |
|
Tokenizer, |
|
models, |
|
normalizers, |
|
pre_tokenizers, |
|
decoders, |
|
trainers, |
|
processors, |
|
) |
|
from tokenizers.pre_tokenizers import WhitespaceSplit, Digits |
|
from tokenizers import Regex |
|
import pandas as pd |
|
|
|
class SupplyChainTokenizer: |
|
""" |
|
A custom tokenizer designed for the Enhanced Business Model for Collaborative |
|
Predictive Supply Chain. It prioritizes industry-specific tokens from a |
|
`vocab.json` file and uses Byte-Pair Encoding (BPE) for out-of-vocabulary |
|
(OOV) words. It handles various data types expected in supply chain data. |
|
|
|
Args: |
|
vocab_path (str): Path to the `vocab.json` file. |
|
max_length (int, optional): Maximum sequence length. Defaults to 512. |
|
""" |
|
|
|
def __init__(self, vocab_path: str, max_length: int = 512): |
|
if not os.path.exists(vocab_path): |
|
raise FileNotFoundError(f"Vocabulary file not found: {vocab_path}") |
|
|
|
self.vocab_path = vocab_path |
|
self.max_length = max_length |
|
|
|
|
|
with open(self.vocab_path, "r", encoding="utf-8") as f: |
|
self.vocab = json.load(f) |
|
|
|
|
|
self.bpe_model = models.BPE( |
|
vocab=self.vocab, |
|
merges=[], |
|
unk_token="[UNK]", |
|
) |
|
|
|
|
|
self.tokenizer = Tokenizer(self.bpe_model) |
|
|
|
|
|
self.tokenizer.normalizer = normalizers.Sequence( |
|
[normalizers.NFD(), normalizers.Lowercase(), normalizers.StripAccents()] |
|
) |
|
|
|
|
|
self.tokenizer.pre_tokenizer = pre_tokenizers.Sequence( |
|
[WhitespaceSplit(), Digits(individual_digits=True)] |
|
) |
|
|
|
|
|
self.tokenizer.decoder = decoders.BPEDecoder() |
|
|
|
|
|
self.tokenizer.post_processor = processors.TemplateProcessing( |
|
single="[CLS] $A [SEP]", |
|
pair="[CLS] $A [SEP] $B:1 [SEP]:1", |
|
special_tokens=[("[CLS]", self.vocab["[CLS]"]), ("[SEP]", self.vocab["[SEP]"])], |
|
) |
|
|
|
self.pad_token_id = self.vocab["[PAD]"] |
|
|
|
def train_bpe(self, files: Union[str, List[str]], vocab_size: int = 30000): |
|
""" |
|
Trains the BPE model on text files. This updates the `merges` of the |
|
BPE model. This is *crucial* for handling words not in the initial |
|
`vocab.json`. |
|
|
|
Args: |
|
files (Union[str, List[str]]): Path(s) to text file(s) for training. |
|
vocab_size (int): The desired vocabulary size (including special tokens |
|
and initial vocabulary). |
|
""" |
|
|
|
if isinstance(files, str): |
|
files = [files] |
|
|
|
|
|
trainer = trainers.BpeTrainer( |
|
vocab_size=vocab_size, |
|
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"], |
|
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), |
|
show_progress=True, |
|
) |
|
|
|
|
|
self.tokenizer.train(files, trainer=trainer) |
|
|
|
|
|
def encode(self, text: str, text_pair: str = None) -> List[str]: |
|
""" |
|
Encodes text into a list of tokens. |
|
|
|
Args: |
|
text (str): The input text. |
|
text_pair (str, optional): An optional second input string. |
|
|
|
Returns: |
|
List[str]: A list of tokens. |
|
""" |
|
encoded = self.tokenizer.encode(text, text_pair) |
|
return encoded.tokens |
|
|
|
def encode_as_ids(self, text: str, text_pair: str = None) -> List[int]: |
|
""" |
|
Encodes text into a list of token IDs. |
|
|
|
Args: |
|
text (str): The input text. |
|
text_pair (str, optional): An optional second input string. |
|
|
|
Returns: |
|
List[int]: A list of token IDs. |
|
""" |
|
encoded = self.tokenizer.encode(text, text_pair) |
|
return encoded.ids |
|
|
|
def decode(self, ids: List[int], skip_special_tokens: bool = True) -> str: |
|
""" |
|
Decodes a list of token IDs back into a string. |
|
|
|
Args: |
|
ids (List[int]): The list of token IDs. |
|
skip_special_tokens (bool): Whether to skip special tokens in decoding. |
|
|
|
Returns: |
|
str: The decoded string. |
|
""" |
|
return self.tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) |
|
|
|
def token_to_id(self, token: str) -> int: |
|
""" |
|
Converts a token to its corresponding ID. |
|
|
|
Args: |
|
token (str): The token. |
|
|
|
Returns: |
|
int: The token ID. Returns None if the token is not in the vocabulary. |
|
""" |
|
return self.vocab.get(token, self.vocab.get("[UNK]")) |
|
|
|
def id_to_token(self, id_: int) -> str: |
|
""" |
|
Converts a token ID to its corresponding token. |
|
|
|
Args: |
|
id_ (int): The token ID. |
|
|
|
Returns: |
|
str: The token. Returns "[UNK]" if the ID is not in the vocabulary. |
|
""" |
|
|
|
reverse_vocab = {v: k for k, v in self.vocab.items()} |
|
return reverse_vocab.get(id_, "[UNK]") |
|
|
|
def get_vocab_size(self) -> int: |
|
"""Gets the vocabulary size.""" |
|
return len(self.vocab) |
|
|
|
|
|
def save(self, directory: str, prefix: str = None): |
|
""" |
|
Saves the tokenizer configuration and vocabulary to a directory. |
|
|
|
Args: |
|
directory (str): The directory to save to. |
|
prefix (str, optional): An optional prefix for the filenames. |
|
""" |
|
if not os.path.exists(directory): |
|
os.makedirs(directory) |
|
|
|
|
|
self.tokenizer.save(os.path.join(directory, (prefix + "-" if prefix else "") + "tokenizer.json")) |
|
|
|
|
|
with open(os.path.join(directory, (prefix + "-" if prefix else "") + "vocab.json"), "w", encoding="utf-8") as f: |
|
json.dump(self.vocab, f, ensure_ascii=False, indent=4) |
|
|
|
@staticmethod |
|
def from_pretrained(directory: str, prefix: str = None): |
|
""" |
|
Loads a pre-trained tokenizer from a directory. |
|
|
|
Args: |
|
directory (str): The directory to load from. |
|
prefix (str, optional): The optional prefix used when saving. |
|
|
|
Returns: |
|
SupplyChainTokenizer: The loaded tokenizer. |
|
""" |
|
|
|
vocab_path = os.path.join(directory, (prefix + "-" if prefix else "") + "vocab.json") |
|
|
|
|
|
|
|
tokenizer = SupplyChainTokenizer(vocab_path) |
|
tokenizer.tokenizer = Tokenizer.from_file(os.path.join(directory, (prefix + "-" if prefix else "") + "tokenizer.json")) |
|
return tokenizer |
|
|
|
|
|
|
|
def prepare_for_model(self, data: pd.DataFrame) -> Tuple[List[List[int]], List[List[int]]]: |
|
""" |
|
Prepares a Pandas DataFrame for the Transformer model. This is the |
|
key method that integrates the tokenizer with the data. |
|
|
|
Args: |
|
data (pd.DataFrame): The input DataFrame, expected to have columns |
|
like 'timestamp', 'sku', 'store_id', 'quantity', 'price', |
|
'discount', 'promotion_id', etc. The exact columns depend on |
|
the features you're using. |
|
|
|
Returns: |
|
Tuple[List[List[int]], List[List[int]]]: A tuple. |
|
1. input_ids: List of token ID sequences for the model. |
|
2. attention_mask: List of attention masks (1 for real tokens, 0 for padding). |
|
""" |
|
input_ids = [] |
|
attention_masks = [] |
|
|
|
for _, row in data.iterrows(): |
|
|
|
|
|
input_string = ( |
|
f"[CLS] timestamp: {row['timestamp']} " |
|
f"sku: {row['sku']} store_id: {row['store_id']} " |
|
f"quantity: {row['quantity']} price: {row['price']} " |
|
f"discount: {row['discount']} " |
|
) |
|
|
|
if 'promotion_id' in row and not pd.isna(row['promotion_id']): |
|
input_string += f"promotion_id: {row['promotion_id']} " |
|
|
|
if 'product_category' in row: |
|
input_string += f"product_category: {row['product_category']} " |
|
|
|
input_string += "[SEP]" |
|
|
|
|
|
|
|
encoded = self.tokenizer.encode(input_string) |
|
token_ids = encoded.ids |
|
attention_mask = encoded.attention_mask |
|
|
|
|
|
padding_length = self.max_length - len(token_ids) |
|
if padding_length > 0: |
|
token_ids += [self.pad_token_id] * padding_length |
|
attention_mask += [0] * padding_length |
|
elif padding_length < 0: |
|
token_ids = token_ids[:self.max_length] |
|
attention_mask = attention_mask[:self.max_length] |
|
|
|
input_ids.append(token_ids) |
|
attention_masks.append(attention_mask) |
|
|
|
return input_ids, attention_masks |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
vocab = { |
|
"[UNK]": 0, |
|
"[CLS]": 1, |
|
"[SEP]": 2, |
|
"[PAD]": 3, |
|
"[MASK]": 4, |
|
"timestamp:": 5, |
|
"sku:": 6, |
|
"store_id:": 7, |
|
"quantity:": 8, |
|
"price:": 9, |
|
"discount:": 10, |
|
"promotion_id:": 11, |
|
"product_category:": 12, |
|
"SKU123": 13, |
|
"SKU123-RED": 14, |
|
"SKU123-BLUE": 15, |
|
"STORE456": 16, |
|
"PLANT789": 17, |
|
"WHOLESALER001": 18, |
|
"RETAILER002": 19, |
|
"BOGO": 20, |
|
"DISCOUNT":21, |
|
} |
|
with open("vocab.json", "w") as f: |
|
json.dump(vocab, f, indent=4) |
|
|
|
|
|
tokenizer = SupplyChainTokenizer(vocab_path="vocab.json") |
|
|
|
|
|
with open("training_data.txt", "w", encoding="utf-8") as f: |
|
f.write("This is some example text for training the BPE model.\n") |
|
f.write("SKU123 is a product. STORE456 is another. plant789 is, too.\n") |
|
f.write("This file contains words not in the initial vocabulary.\n") |
|
|
|
tokenizer.train_bpe("training_data.txt", vocab_size=50) |
|
|
|
|
|
|
|
text = "timestamp: 2024-07-03 sku: SKU123 store_id: STORE456 quantity: 2 price: 10.99 discount: 0.0" |
|
encoded_tokens = tokenizer.encode(text) |
|
encoded_ids = tokenizer.encode_as_ids(text) |
|
print(f"Encoded tokens: {encoded_tokens}") |
|
print(f"Encoded IDs: {encoded_ids}") |
|
|
|
decoded_text = tokenizer.decode(encoded_ids) |
|
print(f"Decoded text: {decoded_text}") |
|
|
|
|
|
data = { |
|
'timestamp': ['2024-07-03 10:00:00', '2024-07-03 11:00:00'], |
|
'sku': ['SKU123', 'SKU123-RED'], |
|
'store_id': ['STORE456', 'STORE456'], |
|
'quantity': [2, 1], |
|
'price': [10.99, 12.99], |
|
'discount': [0.0, 1.0], |
|
'promotion_id': ['BOGO', None], |
|
'product_category': ['Electronics', 'Electronics'] |
|
} |
|
df = pd.DataFrame(data) |
|
input_ids, attention_masks = tokenizer.prepare_for_model(df) |
|
print(f"Input IDs (for model): {input_ids}") |
|
print(f"Attention Masks: {attention_masks}") |
|
|
|
|
|
tokenizer.save("my_tokenizer") |
|
loaded_tokenizer = SupplyChainTokenizer.from_pretrained("my_tokenizer") |
|
print(f"Loaded tokenizer vocab size: {loaded_tokenizer.get_vocab_size()}") |
|
|
|
|
|
os.remove("vocab.json") |
|
os.remove("training_data.txt") |