|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch ZEN2 model classes.""" |
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals |
|
|
|
import copy |
|
import logging |
|
import math |
|
import os |
|
import sys |
|
|
|
sys.path.append("/cognitive_comp/lujunyu/TMP/Fengshenbang-LM") |
|
|
|
import torch |
|
from torch import nn |
|
from torch.nn import CrossEntropyLoss |
|
from dataclasses import dataclass |
|
from typing import Optional |
|
from dataclasses import dataclass |
|
from typing import List, Optional, Tuple, Union |
|
from transformers import PreTrainedModel |
|
from transformers.utils import ModelOutput |
|
|
|
from fengshen.models.zen2.configuration_zen2 import ZenConfig |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
PRETRAINED_MODEL_ARCHIVE_MAP = { |
|
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", |
|
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", |
|
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", |
|
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", |
|
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", |
|
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", |
|
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", |
|
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", |
|
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", |
|
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", |
|
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", |
|
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", |
|
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", |
|
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/pytorch_model.bin', |
|
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/pytorch_model.bin', |
|
} |
|
PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json", |
|
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json", |
|
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json", |
|
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json", |
|
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json", |
|
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json", |
|
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json", |
|
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json", |
|
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json", |
|
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json", |
|
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json", |
|
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json", |
|
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json", |
|
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/config.json', |
|
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/config.json', |
|
} |
|
BERT_CONFIG_NAME = 'bert_config.json' |
|
TF_WEIGHTS_NAME = 'model.ckpt' |
|
|
|
@dataclass |
|
class BertForPreTrainingOutput(ModelOutput): |
|
""" |
|
Output type of [`BertForPreTraining`]. |
|
|
|
Args: |
|
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): |
|
Total loss as the sum of the masked language modeling loss and the next sequence prediction |
|
(classification) loss. |
|
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
|
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): |
|
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation |
|
before SoftMax). |
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of |
|
shape `(batch_size, sequence_length, hidden_size)`. |
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
|
sequence_length)`. |
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
heads. |
|
""" |
|
|
|
loss: Optional[torch.FloatTensor] = None |
|
prediction_logits: torch.FloatTensor = None |
|
seq_relationship_logits: torch.FloatTensor = None |
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
|
attentions: Optional[Tuple[torch.FloatTensor]] = None |
|
|
|
@dataclass |
|
class MaskedLMOutput(ModelOutput): |
|
""" |
|
Base class for masked language models outputs. |
|
|
|
Args: |
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
|
Masked language modeling (MLM) loss. |
|
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
|
sequence_length)`. |
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
heads. |
|
""" |
|
|
|
loss: Optional[torch.FloatTensor] = None |
|
logits: torch.FloatTensor = None |
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
|
attentions: Optional[Tuple[torch.FloatTensor]] = None |
|
|
|
def prune_linear_layer(layer, index, dim=0): |
|
""" Prune a linear layer (a model parameters) to keep only entries in index. |
|
Return the pruned layer as a new layer with requires_grad=True. |
|
Used to remove heads. |
|
""" |
|
index = index.to(layer.weight.device) |
|
W = layer.weight.index_select(dim, index).clone().detach() |
|
if layer.bias is not None: |
|
if dim == 1: |
|
b = layer.bias.clone().detach() |
|
else: |
|
b = layer.bias[index].clone().detach() |
|
new_size = list(layer.weight.size()) |
|
new_size[dim] = len(index) |
|
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) |
|
new_layer.weight.requires_grad = False |
|
new_layer.weight.copy_(W.contiguous()) |
|
new_layer.weight.requires_grad = True |
|
if layer.bias is not None: |
|
new_layer.bias.requires_grad = False |
|
new_layer.bias.copy_(b.contiguous()) |
|
new_layer.bias.requires_grad = True |
|
return new_layer |
|
|
|
|
|
def load_tf_weights_in_bert(model, tf_checkpoint_path): |
|
""" Load tf checkpoints in a pytorch model |
|
""" |
|
try: |
|
import re |
|
import numpy as np |
|
import tensorflow as tf |
|
except ImportError: |
|
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " |
|
"https://www.tensorflow.org/install/ for installation instructions.") |
|
raise |
|
tf_path = os.path.abspath(tf_checkpoint_path) |
|
print("Converting TensorFlow checkpoint from {}".format(tf_path)) |
|
|
|
init_vars = tf.train.list_variables(tf_path) |
|
names = [] |
|
arrays = [] |
|
for name, shape in init_vars: |
|
print("Loading TF weight {} with shape {}".format(name, shape)) |
|
array = tf.train.load_variable(tf_path, name) |
|
names.append(name) |
|
arrays.append(array) |
|
|
|
for name, array in zip(names, arrays): |
|
name = name.split('/') |
|
|
|
|
|
if any(n in ["adam_v", "adam_m", "global_step"] for n in name): |
|
print("Skipping {}".format("/".join(name))) |
|
continue |
|
pointer = model |
|
for m_name in name: |
|
if re.fullmatch(r'[A-Za-z]+_\d+', m_name): |
|
name_lists = re.split(r'_(\d+)', m_name) |
|
else: |
|
name_lists = [m_name] |
|
if name_lists[0] == 'kernel' or name_lists[0] == 'gamma': |
|
pointer = getattr(pointer, 'weight') |
|
elif name_lists[0] == 'output_bias' or name_lists[0] == 'beta': |
|
pointer = getattr(pointer, 'bias') |
|
elif name_lists[0] == 'output_weights': |
|
pointer = getattr(pointer, 'weight') |
|
elif name_lists[0] == 'squad': |
|
pointer = getattr(pointer, 'classifier') |
|
else: |
|
try: |
|
pointer = getattr(pointer, name_lists[0]) |
|
except AttributeError: |
|
print("Skipping {}".format("/".join(name))) |
|
continue |
|
if len(name_lists) >= 2: |
|
num = int(name_lists[1]) |
|
pointer = pointer[num] |
|
if m_name[-11:] == '_embeddings': |
|
pointer = getattr(pointer, 'weight') |
|
elif m_name == 'kernel': |
|
array = np.transpose(array) |
|
try: |
|
assert pointer.shape == array.shape |
|
except AssertionError as e: |
|
e.args += (pointer.shape, array.shape) |
|
raise |
|
print("Initialize PyTorch weight {}".format(name)) |
|
pointer.data = torch.from_numpy(array) |
|
return model |
|
|
|
|
|
def gelu(x): |
|
"""Implementation of the gelu activation function. |
|
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): |
|
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
|
Also see https://arxiv.org/abs/1606.08415 |
|
""" |
|
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) |
|
|
|
|
|
def swish(x): |
|
return x * torch.sigmoid(x) |
|
|
|
|
|
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} |
|
|
|
|
|
try: |
|
|
|
from torch.nn import LayerNorm as BertLayerNorm |
|
except ImportError: |
|
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") |
|
|
|
class BertLayerNorm(nn.Module): |
|
def __init__(self, hidden_size, eps=1e-12): |
|
"""Construct a layernorm module in the TF style (epsilon inside the square root). |
|
""" |
|
super(BertLayerNorm, self).__init__() |
|
self.weight = nn.Parameter(torch.ones(hidden_size)) |
|
self.bias = nn.Parameter(torch.zeros(hidden_size)) |
|
self.variance_epsilon = eps |
|
|
|
def forward(self, x): |
|
u = x.mean(-1, keepdim=True) |
|
s = (x - u).pow(2).mean(-1, keepdim=True) |
|
x = (x - u) / torch.sqrt(s + self.variance_epsilon) |
|
return self.weight * x + self.bias |
|
|
|
|
|
class BertEmbeddings(nn.Module): |
|
"""Construct the embeddings from word, position and token_type embeddings. |
|
""" |
|
|
|
def __init__(self, config): |
|
super(BertEmbeddings, self).__init__() |
|
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) |
|
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) |
|
|
|
|
|
|
|
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, input_ids, token_type_ids=None): |
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros_like(input_ids) |
|
|
|
words_embeddings = self.word_embeddings(input_ids) |
|
token_type_embeddings = self.token_type_embeddings(token_type_ids) |
|
|
|
embeddings = words_embeddings + token_type_embeddings |
|
embeddings = self.LayerNorm(embeddings) |
|
embeddings = self.dropout(embeddings) |
|
return embeddings |
|
|
|
|
|
class BertWordEmbeddings(nn.Module): |
|
"""Construct the embeddings from ngram, position and token_type embeddings. |
|
""" |
|
|
|
def __init__(self, config): |
|
super(BertWordEmbeddings, self).__init__() |
|
self.word_embeddings = nn.Embedding(config.word_size, config.hidden_size, padding_idx=0) |
|
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) |
|
|
|
|
|
|
|
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, input_ids, token_type_ids=None): |
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros_like(input_ids) |
|
|
|
words_embeddings = self.word_embeddings(input_ids) |
|
token_type_embeddings = self.token_type_embeddings(token_type_ids) |
|
|
|
embeddings = words_embeddings + token_type_embeddings |
|
embeddings = self.LayerNorm(embeddings) |
|
embeddings = self.dropout(embeddings) |
|
return embeddings |
|
|
|
|
|
class RelativeSinusoidalPositionalEmbedding(nn.Module): |
|
"""This module produces sinusoidal positional embeddings of any length. |
|
Padding symbols are ignored. |
|
""" |
|
|
|
def __init__(self, embedding_dim, padding_idx, init_size=1568): |
|
""" |
|
|
|
:param embedding_dim: 每个位置的dimension |
|
:param padding_idx: |
|
:param init_size: |
|
""" |
|
super().__init__() |
|
self.embedding_dim = embedding_dim |
|
self.padding_idx = padding_idx |
|
assert init_size % 2 == 0 |
|
weights = self.get_embedding( |
|
init_size+1, |
|
embedding_dim, |
|
padding_idx, |
|
) |
|
self.register_buffer('weights', weights) |
|
self.register_buffer('_float_tensor', torch.FloatTensor(1)) |
|
|
|
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None): |
|
"""Build sinusoidal embeddings. |
|
This matches the implementation in tensor2tensor, but differs slightly |
|
from the description in Section 3.5 of "Attention Is All You Need". |
|
""" |
|
half_dim = embedding_dim // 2 |
|
emb = math.log(10000) / (half_dim - 1) |
|
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) |
|
emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) |
|
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) |
|
if embedding_dim % 2 == 1: |
|
|
|
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) |
|
if padding_idx is not None: |
|
emb[padding_idx, :] = 0 |
|
self.origin_shift = num_embeddings//2 + 1 |
|
return emb |
|
|
|
def forward(self, input): |
|
"""Input is expected to be of size [bsz x seqlen]. |
|
""" |
|
bsz, _, _, seq_len = input.size() |
|
max_pos = self.padding_idx + seq_len |
|
if max_pos > self.origin_shift: |
|
|
|
weights = self.get_embedding( |
|
max_pos*2, |
|
self.embedding_dim, |
|
self.padding_idx, |
|
) |
|
weights = weights.to(self._float_tensor) |
|
del self.weights |
|
self.origin_shift = weights.size(0)//2 |
|
self.register_buffer('weights', weights) |
|
|
|
positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift |
|
embed = self.weights.index_select(0, positions.long()).detach() |
|
return embed |
|
|
|
|
|
class BertSelfAttention(nn.Module): |
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(BertSelfAttention, self).__init__() |
|
if config.hidden_size % config.num_attention_heads != 0: |
|
raise ValueError( |
|
"The hidden size (%d) is not a multiple of the number of attention " |
|
"heads (%d)" % (config.hidden_size, config.num_attention_heads)) |
|
self.output_attentions = output_attentions |
|
self.keep_multihead_output = keep_multihead_output |
|
self.multihead_output = None |
|
|
|
self.num_attention_heads = config.num_attention_heads |
|
self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
|
self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
|
self.query = nn.Linear(config.hidden_size, self.all_head_size) |
|
self.key = nn.Linear(config.hidden_size, self.all_head_size) |
|
self.value = nn.Linear(config.hidden_size, self.all_head_size) |
|
|
|
self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
|
self.softmax = nn.Softmax(dim=-1) |
|
|
|
self.position_embedding = RelativeSinusoidalPositionalEmbedding(self.attention_head_size, 0, 1200) |
|
self.r_r_bias = nn.Parameter( |
|
nn.init.xavier_normal_(torch.zeros(self.num_attention_heads, self.attention_head_size))) |
|
self.r_w_bias = nn.Parameter( |
|
nn.init.xavier_normal_(torch.zeros(self.num_attention_heads, self.attention_head_size))) |
|
|
|
def transpose_for_scores(self, x): |
|
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) |
|
x = x.view(*new_x_shape) |
|
return x.permute(0, 2, 1, 3) |
|
|
|
def forward(self, hidden_states, attention_mask, head_mask=None): |
|
position_embedding = self.position_embedding(attention_mask) |
|
|
|
mixed_query_layer = self.query(hidden_states) |
|
mixed_key_layer = self.key(hidden_states) |
|
mixed_value_layer = self.value(hidden_states) |
|
|
|
query_layer = self.transpose_for_scores(mixed_query_layer) |
|
key_layer = self.transpose_for_scores(mixed_key_layer) |
|
value_layer = self.transpose_for_scores(mixed_value_layer) |
|
|
|
rw_head_q = query_layer + self.r_r_bias[:, None] |
|
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q.float(), key_layer.float()]) |
|
|
|
D_ = torch.einsum('nd,ld->nl', self.r_w_bias.float(), position_embedding.float())[None, :, |
|
None] |
|
B_ = torch.einsum('bnqd,ld->bnql', query_layer.float(), |
|
position_embedding.float()) |
|
BD = B_ + D_ |
|
BD = self._shift(BD) |
|
attention_scores = AC + BD |
|
attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
|
|
|
attention_scores = attention_scores + attention_mask |
|
|
|
|
|
attention_probs = self.softmax(attention_scores) |
|
|
|
|
|
|
|
attention_probs = self.dropout(attention_probs) |
|
|
|
|
|
if head_mask is not None: |
|
attention_probs = attention_probs * head_mask |
|
|
|
context_layer = torch.matmul(attention_probs.type_as(value_layer), value_layer) |
|
if self.keep_multihead_output: |
|
self.multihead_output = context_layer |
|
self.multihead_output.retain_grad() |
|
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
|
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
|
context_layer = context_layer.view(*new_context_layer_shape) |
|
if self.output_attentions: |
|
return attention_probs, context_layer |
|
return context_layer |
|
|
|
def _shift(self, BD): |
|
""" |
|
类似 |
|
-3 -2 -1 0 1 2 |
|
-3 -2 -1 0 1 2 |
|
-3 -2 -1 0 1 2 |
|
|
|
转换为 |
|
0 1 2 |
|
-1 0 1 |
|
-2 -1 0 |
|
|
|
:param BD: batch_size x n_head x max_len x 2max_len |
|
:return: batch_size x n_head x max_len x max_len |
|
""" |
|
bsz, n_head, max_len, _ = BD.size() |
|
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1) |
|
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) |
|
BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) |
|
BD = BD[:, :, :, max_len:] |
|
return BD |
|
|
|
|
|
class BertSelfOutput(nn.Module): |
|
def __init__(self, config): |
|
super(BertSelfOutput, self).__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
class BertAttention(nn.Module): |
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(BertAttention, self).__init__() |
|
self.output_attentions = output_attentions |
|
self.self = BertSelfAttention(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.output = BertSelfOutput(config) |
|
|
|
def prune_heads(self, heads): |
|
if len(heads) == 0: |
|
return |
|
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) |
|
for head in heads: |
|
mask[head] = 0 |
|
mask = mask.view(-1).contiguous().eq(1) |
|
index = torch.arange(len(mask))[mask].long() |
|
|
|
self.self.query = prune_linear_layer(self.self.query, index) |
|
self.self.key = prune_linear_layer(self.self.key, index) |
|
self.self.value = prune_linear_layer(self.self.value, index) |
|
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
|
|
|
self.self.num_attention_heads = self.self.num_attention_heads - len(heads) |
|
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads |
|
|
|
def forward(self, input_tensor, attention_mask, head_mask=None): |
|
self_output = self.self(input_tensor, attention_mask, head_mask) |
|
if self.output_attentions: |
|
attentions, self_output = self_output |
|
attention_output = self.output(self_output, input_tensor) |
|
if self.output_attentions: |
|
return attentions, attention_output |
|
return attention_output |
|
|
|
|
|
class BertIntermediate(nn.Module): |
|
def __init__(self, config): |
|
super(BertIntermediate, self).__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
|
|
|
if isinstance(config.hidden_act, str): |
|
self.intermediate_act_fn = ACT2FN[config.hidden_act] |
|
else: |
|
self.intermediate_act_fn = config.hidden_act |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.intermediate_act_fn(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class BertOutput(nn.Module): |
|
def __init__(self, config): |
|
super(BertOutput, self).__init__() |
|
self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
|
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
class BertLayer(nn.Module): |
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(BertLayer, self).__init__() |
|
self.output_attentions = output_attentions |
|
self.attention = BertAttention(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.intermediate = BertIntermediate(config) |
|
self.output = BertOutput(config) |
|
|
|
def forward(self, hidden_states, attention_mask, head_mask=None): |
|
attention_output = self.attention(hidden_states, attention_mask, head_mask) |
|
if self.output_attentions: |
|
attentions, attention_output = attention_output |
|
intermediate_output = self.intermediate(attention_output) |
|
layer_output = self.output(intermediate_output, attention_output) |
|
if self.output_attentions: |
|
return attentions, layer_output |
|
return layer_output |
|
|
|
|
|
class ZenEncoder(nn.Module): |
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenEncoder, self).__init__() |
|
self.output_attentions = output_attentions |
|
layer = BertLayer(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) |
|
self.word_layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_word_layers)]) |
|
self.num_hidden_word_layers = config.num_hidden_word_layers |
|
|
|
def forward(self, hidden_states, ngram_hidden_states, ngram_position_matrix, attention_mask, |
|
ngram_attention_mask, |
|
output_all_encoded_layers=True, head_mask=None): |
|
|
|
all_encoder_layers = [] |
|
all_attentions = [] |
|
num_hidden_ngram_layers = self.num_hidden_word_layers |
|
for i, layer_module in enumerate(self.layer): |
|
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i]) |
|
if i < num_hidden_ngram_layers: |
|
ngram_hidden_states = self.word_layers[i](ngram_hidden_states, ngram_attention_mask, head_mask[i]) |
|
if self.output_attentions: |
|
ngram_attentions, ngram_hidden_states = ngram_hidden_states |
|
all_attentions.append(ngram_attentions) |
|
if self.output_attentions: |
|
attentions, hidden_states = hidden_states |
|
all_attentions.append(attentions) |
|
hidden_states += torch.bmm(ngram_position_matrix.float(), ngram_hidden_states.float()) |
|
if output_all_encoded_layers: |
|
all_encoder_layers.append(hidden_states) |
|
if not output_all_encoded_layers: |
|
all_encoder_layers.append(hidden_states) |
|
if self.output_attentions: |
|
return all_attentions, all_encoder_layers |
|
return all_encoder_layers |
|
|
|
|
|
class BertPooler(nn.Module): |
|
def __init__(self, config): |
|
super(BertPooler, self).__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.activation = nn.Tanh() |
|
|
|
def forward(self, hidden_states): |
|
|
|
|
|
first_token_tensor = hidden_states[:, 0] |
|
pooled_output = self.dense(first_token_tensor) |
|
pooled_output = self.activation(pooled_output) |
|
return pooled_output |
|
|
|
|
|
class BertPredictionHeadTransform(nn.Module): |
|
def __init__(self, config): |
|
super(BertPredictionHeadTransform, self).__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
|
|
if isinstance(config.hidden_act, str): |
|
self.transform_act_fn = ACT2FN[config.hidden_act] |
|
else: |
|
self.transform_act_fn = config.hidden_act |
|
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.transform_act_fn(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class BertLMPredictionHead(nn.Module): |
|
def __init__(self, config, bert_model_embedding_weights): |
|
super(BertLMPredictionHead, self).__init__() |
|
self.transform = BertPredictionHeadTransform(config) |
|
|
|
|
|
|
|
self.decoder = nn.Linear(bert_model_embedding_weights.size(1), |
|
bert_model_embedding_weights.size(0), |
|
bias=False) |
|
self.decoder.weight = bert_model_embedding_weights |
|
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.transform(hidden_states) |
|
hidden_states = self.decoder(hidden_states) + self.bias |
|
return hidden_states |
|
|
|
|
|
class ZenOnlyMLMHead(nn.Module): |
|
def __init__(self, config, bert_model_embedding_weights): |
|
super(ZenOnlyMLMHead, self).__init__() |
|
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) |
|
|
|
def forward(self, sequence_output): |
|
prediction_scores = self.predictions(sequence_output) |
|
return prediction_scores |
|
|
|
|
|
class ZenOnlyNSPHead(nn.Module): |
|
def __init__(self, config): |
|
super(ZenOnlyNSPHead, self).__init__() |
|
self.seq_relationship = nn.Linear(config.hidden_size, 2) |
|
|
|
def forward(self, pooled_output): |
|
seq_relationship_score = self.seq_relationship(pooled_output) |
|
return seq_relationship_score |
|
|
|
|
|
class ZenPreTrainingHeads(nn.Module): |
|
def __init__(self, config, bert_model_embedding_weights): |
|
super(ZenPreTrainingHeads, self).__init__() |
|
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) |
|
self.seq_relationship = nn.Linear(config.hidden_size, 2) |
|
|
|
def forward(self, sequence_output, pooled_output): |
|
prediction_scores = self.predictions(sequence_output) |
|
seq_relationship_score = self.seq_relationship(pooled_output) |
|
return prediction_scores, seq_relationship_score |
|
|
|
|
|
class ZenPreTrainedModel(PreTrainedModel): |
|
""" An abstract class to handle weights initialization and |
|
a simple interface for dowloading and loading pretrained models. |
|
""" |
|
config_class = ZenConfig |
|
supports_gradient_checkpointing = True |
|
_keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
|
def _init_weights(self, module): |
|
"""Initialize the weights""" |
|
if isinstance(module, nn.Linear): |
|
|
|
|
|
module.weight.data.normal_( |
|
mean=0.0, std=self.config.initializer_range) |
|
if module.bias is not None: |
|
module.bias.data.zero_() |
|
elif isinstance(module, nn.Embedding): |
|
module.weight.data.normal_( |
|
mean=0.0, std=self.config.initializer_range) |
|
if module.padding_idx is not None: |
|
module.weight.data[module.padding_idx].zero_() |
|
elif isinstance(module, nn.LayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
|
|
|
|
class ZenModel(ZenPreTrainedModel): |
|
"""ZEN model ("BERT-based Chinese (Z) text encoder Enhanced by N-gram representations"). |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
|
|
Outputs: Tuple of (encoded_layers, pooled_output) |
|
`encoded_layers`: controled by `output_all_encoded_layers` argument: |
|
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end |
|
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each |
|
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], |
|
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding |
|
to the last attention block of shape [batch_size, sequence_length, hidden_size], |
|
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a |
|
classifier pretrained on top of the hidden state associated to the first character of the |
|
input (`CLS`) to train on the Next-Sentence task (see BERT's paper). |
|
|
|
""" |
|
|
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenModel, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.embeddings = BertEmbeddings(config) |
|
self.word_embeddings = BertWordEmbeddings(config) |
|
self.encoder = ZenEncoder(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.pooler = BertPooler(config) |
|
self.init_weights() |
|
|
|
def prune_heads(self, heads_to_prune): |
|
""" Prunes heads of the model. |
|
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
|
""" |
|
for layer, heads in heads_to_prune.items(): |
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
def get_multihead_outputs(self): |
|
""" Gather all multi-head outputs. |
|
Return: list (layers) of multihead module outputs with gradients |
|
""" |
|
return [layer.attention.self.multihead_output for layer in self.encoder.layer] |
|
|
|
def forward(self, input_ids, |
|
input_ngram_ids, |
|
ngram_position_matrix, |
|
token_type_ids=None, |
|
ngram_token_type_ids=None, |
|
attention_mask=None, |
|
ngram_attention_mask=None, |
|
output_all_encoded_layers=True, |
|
head_mask=None): |
|
if attention_mask is None: |
|
attention_mask = torch.ones_like(input_ids) |
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros_like(input_ids) |
|
|
|
if ngram_attention_mask is None: |
|
ngram_attention_mask = torch.ones_like(input_ngram_ids) |
|
if ngram_token_type_ids is None: |
|
ngram_token_type_ids = torch.zeros_like(input_ngram_ids) |
|
|
|
|
|
|
|
|
|
|
|
|
|
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
extended_ngram_attention_mask = ngram_attention_mask.unsqueeze(1).unsqueeze(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) |
|
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 |
|
|
|
extended_ngram_attention_mask = extended_ngram_attention_mask.to(dtype=next(self.parameters()).dtype) |
|
extended_ngram_attention_mask = (1.0 - extended_ngram_attention_mask) * -10000.0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
if head_mask is not None: |
|
if head_mask.dim() == 1: |
|
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) |
|
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1) |
|
elif head_mask.dim() == 2: |
|
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze( |
|
-1) |
|
head_mask = head_mask.to( |
|
dtype=next(self.parameters()).dtype) |
|
else: |
|
head_mask = [None] * self.config.num_hidden_layers |
|
|
|
embedding_output = self.embeddings(input_ids, token_type_ids) |
|
ngram_embedding_output = self.word_embeddings(input_ngram_ids, ngram_token_type_ids) |
|
|
|
encoded_layers = self.encoder(embedding_output, |
|
ngram_embedding_output, |
|
ngram_position_matrix, |
|
extended_attention_mask, |
|
extended_ngram_attention_mask, |
|
output_all_encoded_layers=output_all_encoded_layers, |
|
head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, encoded_layers = encoded_layers |
|
sequence_output = encoded_layers[-1] |
|
pooled_output = self.pooler(sequence_output) |
|
if not output_all_encoded_layers: |
|
encoded_layers = encoded_layers[-1] |
|
if self.output_attentions: |
|
return all_attentions, encoded_layers, pooled_output |
|
return encoded_layers, pooled_output |
|
|
|
|
|
class ZenForPreTraining(ZenPreTrainedModel): |
|
"""ZEN model with pre-training heads. |
|
This module comprises the ZEN model followed by the two pre-training heads: |
|
- the masked language modeling head, and |
|
- the next sentence classification head. |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] |
|
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss |
|
is only computed for the labels set in [0, ..., vocab_size] |
|
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] |
|
with indices selected in [0, 1]. |
|
0 => next sentence is the continuation, 1 => next sentence is a random sentence. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
Outputs: |
|
if `masked_lm_labels` and `next_sentence_label` are not `None`: |
|
Outputs the total_loss which is the sum of the masked language modeling loss and the next |
|
sentence classification loss. |
|
if `masked_lm_labels` or `next_sentence_label` is `None`: |
|
Outputs a tuple comprising |
|
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and |
|
- the next sentence classification logits of shape [batch_size, 2]. |
|
|
|
""" |
|
|
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForPreTraining, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.cls = ZenPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, |
|
ngram_token_type_ids=None, |
|
attention_mask=None, |
|
ngram_attention_mask=None, |
|
masked_lm_labels=None, |
|
next_sentence_label=None, head_mask=None): |
|
|
|
outputs = self.bert(input_ids, |
|
input_ngram_ids, |
|
ngram_position_matrix, |
|
token_type_ids, |
|
ngram_token_type_ids, |
|
attention_mask, |
|
ngram_attention_mask, |
|
output_all_encoded_layers=False, head_mask=head_mask) |
|
|
|
if self.output_attentions: |
|
all_attentions, sequence_output, pooled_output = outputs |
|
else: |
|
sequence_output, pooled_output = outputs |
|
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) |
|
|
|
if masked_lm_labels is not None and next_sentence_label is not None: |
|
loss_fct = CrossEntropyLoss() |
|
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) |
|
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) |
|
total_loss = masked_lm_loss + next_sentence_loss |
|
|
|
return BertForPreTrainingOutput(loss=total_loss,prediction_logits=prediction_scores) |
|
elif self.output_attentions: |
|
return all_attentions, prediction_scores, seq_relationship_score |
|
return prediction_scores, seq_relationship_score |
|
|
|
|
|
class ZenForMaskedLM(ZenPreTrainedModel): |
|
"""ZEN model with the masked language modeling head. |
|
This module comprises the ZEN model followed by the masked language modeling head. |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] |
|
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss |
|
is only computed for the labels set in [0, ..., vocab_size] |
|
`head_mask`: an optional torch.LongTensor of shape [num_heads] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
Outputs: |
|
if `masked_lm_labels` is not `None`: |
|
Outputs the masked language modeling loss. |
|
if `masked_lm_labels` is `None`: |
|
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. |
|
|
|
""" |
|
|
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForMaskedLM, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.cls = ZenOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, ngram_attention_mask=None, masked_lm_labels=None, head_mask=None): |
|
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, None, attention_mask, ngram_attention_mask, |
|
output_all_encoded_layers=False, |
|
head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, sequence_output, _ = outputs |
|
else: |
|
sequence_output, _ = outputs |
|
prediction_scores = self.cls(sequence_output) |
|
|
|
if masked_lm_labels is not None: |
|
loss_fct = CrossEntropyLoss(ignore_index=-1) |
|
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) |
|
return MaskedLMOutput(loss=masked_lm_loss,logits=prediction_scores) |
|
elif self.output_attentions: |
|
return all_attentions, prediction_scores |
|
return MaskedLMOutput(loss=masked_lm_loss,logits=prediction_scores) |
|
|
|
|
|
class ZenForNextSentencePrediction(ZenPreTrainedModel): |
|
"""ZEN model with next sentence prediction head. |
|
This module comprises the ZEN model followed by the next sentence classification head. |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] |
|
with indices selected in [0, 1]. |
|
0 => next sentence is the continuation, 1 => next sentence is a random sentence. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
Outputs: |
|
if `next_sentence_label` is not `None`: |
|
Outputs the total_loss which is the sum of the masked language modeling loss and the next |
|
sentence classification loss. |
|
if `next_sentence_label` is `None`: |
|
Outputs the next sentence classification logits of shape [batch_size, 2]. |
|
|
|
""" |
|
|
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForNextSentencePrediction, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.cls = ZenOnlyNSPHead(config) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None): |
|
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, attention_mask, |
|
output_all_encoded_layers=False, |
|
head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, _, pooled_output = outputs |
|
else: |
|
_, pooled_output = outputs |
|
seq_relationship_score = self.cls(pooled_output) |
|
|
|
if next_sentence_label is not None: |
|
loss_fct = CrossEntropyLoss(ignore_index=-1) |
|
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) |
|
return next_sentence_loss |
|
elif self.output_attentions: |
|
return all_attentions, seq_relationship_score |
|
return seq_relationship_score |
|
|
|
|
|
class ZenForSequenceClassification(ZenPreTrainedModel): |
|
"""ZEN model for classification. |
|
This module is composed of the ZEN model with a linear layer on top of |
|
the pooled output. |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
`num_labels`: the number of classes for the classifier. Default = 2. |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts |
|
`extract_features.py`, `run_classifier.py` and `run_squad.py`) |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size] |
|
with indices selected in [0, ..., num_labels]. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
Outputs: |
|
if `labels` is not `None`: |
|
Outputs the CrossEntropy classification loss of the output with the labels. |
|
if `labels` is `None`: |
|
Outputs the classification logits of shape [batch_size, num_labels]. |
|
|
|
""" |
|
|
|
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForSequenceClassification, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.num_labels = config.num_labels |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
self.classifier = nn.Linear(config.hidden_size, self.num_labels) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, labels=None, head_mask=None): |
|
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, |
|
attention_mask=attention_mask, |
|
output_all_encoded_layers=False, |
|
head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, _, pooled_output = outputs |
|
else: |
|
_, pooled_output = outputs |
|
pooled_output = self.dropout(pooled_output) |
|
logits = self.classifier(pooled_output) |
|
loss = None |
|
if labels is not None: |
|
loss_fct = CrossEntropyLoss() |
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
return loss, logits |
|
elif self.output_attentions: |
|
return all_attentions, logits |
|
return loss, logits |
|
|
|
|
|
@dataclass |
|
class TokenClassifierOutput: |
|
""" |
|
Base class for outputs of token classification models. |
|
""" |
|
|
|
loss: Optional[torch.FloatTensor] = None |
|
logits: torch.FloatTensor = None |
|
|
|
|
|
class ZenForTokenClassification(ZenPreTrainedModel): |
|
"""ZEN model for token-level classification. |
|
This module is composed of the ZEN model with a linear layer on top of |
|
the full hidden state of the last layer. |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
`num_labels`: the number of classes for the classifier. Default = 2. |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] |
|
with indices selected in [0, ..., num_labels]. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
`input_ngram_ids`: input_ids of ngrams. |
|
`ngram_token_type_ids`: token_type_ids of ngrams. |
|
`ngram_attention_mask`: attention_mask of ngrams. |
|
`ngram_position_matrix`: position matrix of ngrams. |
|
|
|
Outputs: |
|
if `labels` is not `None`: |
|
Outputs the CrossEntropy classification loss of the output with the labels. |
|
if `labels` is `None`: |
|
Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. |
|
|
|
""" |
|
|
|
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForTokenClassification, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.num_labels = config.num_labels |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
self.classifier = nn.Linear(config.hidden_size, self.num_labels) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None, |
|
input_ngram_ids=None, ngram_position_matrix=None, head_mask=None, b_use_valid_filter=False): |
|
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, |
|
attention_mask=attention_mask, output_all_encoded_layers=False, head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, sequence_output, _ = outputs |
|
else: |
|
sequence_output, _ = outputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
valid_output = sequence_output |
|
|
|
sequence_output = self.dropout(valid_output) |
|
logits = self.classifier(sequence_output) |
|
loss = None |
|
if labels is not None: |
|
loss_fct = CrossEntropyLoss(ignore_index=0) |
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
|
active_loss = attention_mask.view(-1) == 1 |
|
active_logits = logits.view(-1, self.num_labels)[active_loss] |
|
active_labels = labels.view(-1)[active_loss] |
|
loss = loss_fct(active_logits, active_labels) |
|
else: |
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
return TokenClassifierOutput(loss, logits) |
|
else: |
|
return TokenClassifierOutput(loss, logits) |
|
|
|
|
|
class ZenForQuestionAnswering(ZenPreTrainedModel): |
|
"""BERT model for Question Answering (span extraction). |
|
This module is composed of the BERT model with a linear layer on top of |
|
the sequence output that computes start_logits and end_logits |
|
|
|
Params: |
|
`config`: a BertConfig class instance with the configuration to build a new model |
|
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False |
|
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. |
|
This can be used to compute head importance metrics. Default: False |
|
|
|
Inputs: |
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] |
|
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts |
|
`extract_features.py`, `run_classifier.py` and `run_squad.py`) |
|
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token |
|
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to |
|
a `sentence B` token (see BERT paper for more details). |
|
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices |
|
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max |
|
input sequence length in the current batch. It's the mask that we typically use for attention when |
|
a batch has varying length sentences. |
|
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. |
|
Positions are clamped to the length of the sequence and position outside of the sequence are not taken |
|
into account for computing the loss. |
|
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. |
|
Positions are clamped to the length of the sequence and position outside of the sequence are not taken |
|
into account for computing the loss. |
|
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. |
|
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. |
|
|
|
Outputs: |
|
if `start_positions` and `end_positions` are not `None`: |
|
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. |
|
if `start_positions` or `end_positions` is `None`: |
|
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end |
|
position tokens of shape [batch_size, sequence_length]. |
|
|
|
Example usage: |
|
```python |
|
# Already been converted into WordPiece token ids |
|
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) |
|
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) |
|
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) |
|
|
|
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, |
|
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) |
|
|
|
model = BertForQuestionAnswering(config) |
|
start_logits, end_logits = model(input_ids, token_type_ids, input_mask) |
|
``` |
|
""" |
|
|
|
def __init__(self, config, output_attentions=False, keep_multihead_output=False): |
|
super(ZenForQuestionAnswering, self).__init__(config) |
|
self.output_attentions = output_attentions |
|
self.bert = ZenModel(config, output_attentions=output_attentions, |
|
keep_multihead_output=keep_multihead_output) |
|
self.qa_outputs = nn.Linear(config.hidden_size, 2) |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, start_positions=None, |
|
end_positions=None, head_mask=None): |
|
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, |
|
attention_mask=attention_mask, |
|
output_all_encoded_layers=False, |
|
head_mask=head_mask) |
|
if self.output_attentions: |
|
all_attentions, sequence_output, _ = outputs |
|
else: |
|
sequence_output, _ = outputs |
|
logits = self.qa_outputs(sequence_output) |
|
start_logits, end_logits = logits.split(1, dim=-1) |
|
start_logits = start_logits.squeeze(-1) |
|
end_logits = end_logits.squeeze(-1) |
|
|
|
if start_positions is not None and end_positions is not None: |
|
|
|
if len(start_positions.size()) > 1: |
|
start_positions = start_positions.squeeze(-1) |
|
if len(end_positions.size()) > 1: |
|
end_positions = end_positions.squeeze(-1) |
|
|
|
ignored_index = start_logits.size(1) |
|
start_positions.clamp_(0, ignored_index) |
|
end_positions.clamp_(0, ignored_index) |
|
|
|
loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
|
start_loss = loss_fct(start_logits, start_positions) |
|
end_loss = loss_fct(end_logits, end_positions) |
|
total_loss = (start_loss + end_loss) / 2 |
|
return total_loss |
|
elif self.output_attentions: |
|
return all_attentions, start_logits, end_logits |
|
return start_logits, end_logits |
|
|