|
import torch |
|
from torchtext.datasets import AG_NEWS |
|
from torchtext.data.utils import get_tokenizer |
|
from torchtext.vocab import build_vocab_from_iterator |
|
from torch.utils.data import DataLoader |
|
from torch.nn.utils.rnn import pad_sequence |
|
|
|
|
|
train_iter, test_iter = AG_NEWS(split=('train', 'test')) |
|
|
|
|
|
tokenizer = get_tokenizer('basic_english') |
|
|
|
def yield_tokens(data_iter): |
|
for _, text in data_iter: |
|
yield tokenizer(text) |
|
|
|
|
|
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"]) |
|
vocab.set_default_index(vocab["<unk>"]) |
|
|
|
|
|
def text_pipeline(x): |
|
return vocab(tokenizer(x)) |
|
|
|
|
|
def label_pipeline(x): |
|
return int(x) - 1 |
|
|
|
|
|
def collate_batch(batch): |
|
label_list, text_list, lengths = [], [], [] |
|
for (_label, _text) in batch: |
|
label_list.append(label_pipeline(_label)) |
|
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) |
|
text_list.append(processed_text) |
|
lengths.append(len(processed_text)) |
|
label_list = torch.tensor(label_list, dtype=torch.int64) |
|
text_list = pad_sequence(text_list, padding_value=0) |
|
return label_list, text_list, lengths |
|
|
|
|
|
train_iter, test_iter = AG_NEWS(split=('train', 'test')) |
|
train_dataloader = DataLoader(list(train_iter), batch_size=8, shuffle=True, collate_fn=collate_batch) |
|
test_dataloader = DataLoader(list(test_iter), batch_size=8, shuffle=False, collate_fn=collate_batch) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|