|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from huggingface_hub import PyTorchModelHubMixin |
|
|
from torch import Tensor |
|
|
from torch.nn import RMSNorm |
|
|
import numpy as np |
|
|
|
|
|
from state import DecoderInferenceState, EncoderInferenceState, KVCache |
|
|
from transformers.modeling_outputs import CausalLMOutput,CausalLMOutputWithCrossAttentions |
|
|
from safetensors.torch import load_file |
|
|
from config import Config |
|
|
import os |
|
|
import math |
|
|
from typing import Optional |
|
|
from dataclasses import dataclass |
|
|
from transformers.modeling_outputs import ModelOutput |
|
|
from typing import Optional, Tuple |
|
|
from transformers import T5EncoderModel |
|
|
import math |
|
|
from einops import rearrange |
|
|
from convnext.convnext import ConvNeXtV2, IdentityConvNeXtV2 |
|
|
from text_encoder.model import T5Encoder |
|
|
from scipy import stats |
|
|
from diffloss import DiffLoss |
|
|
|
|
|
@dataclass |
|
|
class QuoteTTSOutput(ModelOutput): |
|
|
""" |
|
|
Base class for masked language models outputs. |
|
|
|
|
|
Args: |
|
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
|
|
Masked language modeling (MLM) loss. |
|
|
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
|
|
sequence_length)`. |
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
|
heads. |
|
|
""" |
|
|
|
|
|
loss: Optional[torch.FloatTensor] = None |
|
|
mask_loss: Optional[torch.FloatTensor] = None |
|
|
logits: Optional[torch.FloatTensor] = None |
|
|
labels: Optional[torch.FloatTensor] = None |
|
|
expressive_latents: Optional[torch.FloatTensor] = None |
|
|
labels_latents: Optional[torch.FloatTensor] = None |
|
|
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
target_mask: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
mu: Optional[torch.FloatTensor] = None |
|
|
logvar: Optional[torch.FloatTensor] = None |
|
|
|
|
|
|
|
|
class SinusoidalPosEmb(nn.Module): |
|
|
def __init__(self, dim): |
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
|
|
|
def forward(self, x): |
|
|
device = x.device |
|
|
half_dim = self.dim // 2 |
|
|
emb = math.log(10000) / (half_dim - 1) |
|
|
emb = torch.exp(torch.arange(half_dim, device=device) * -emb) |
|
|
emb = x[:, None] * emb[None, :] * 1.0 |
|
|
emb = torch.cat((emb.sin(), emb.cos()), dim=-1) |
|
|
return emb |
|
|
|
|
|
|
|
|
|
|
|
def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]: |
|
|
return tuple(ax if ax >= 0 else ndim + ax for ax in axes) |
|
|
|
|
|
class DenseGeneral(nn.Module): |
|
|
""" |
|
|
PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init. |
|
|
|
|
|
Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot |
|
|
for the generalized matrix multiplication. Weight/bias shapes are calculated |
|
|
and parameters created during initialization based on config. |
|
|
`load_weights` validates shapes and copies data. |
|
|
|
|
|
Attributes: |
|
|
axis (Tuple[int, ...]): Input axis or axes to contract. |
|
|
in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`. |
|
|
out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims). |
|
|
use_bias (bool): Whether to add a bias term. |
|
|
weight (nn.Parameter): The kernel parameter. |
|
|
bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True). |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
in_shapes: tuple[int, ...], |
|
|
out_features: tuple[int, ...], |
|
|
axis: tuple[int, ...] = (-1,), |
|
|
|
|
|
|
|
|
): |
|
|
super().__init__() |
|
|
self.in_shapes = in_shapes |
|
|
self.out_features = out_features |
|
|
self.axis = axis |
|
|
self.kernel_shape = self.in_shapes + self.out_features |
|
|
|
|
|
|
|
|
self.weight = nn.Parameter(torch.empty(self.kernel_shape)) |
|
|
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) |
|
|
|
|
|
|
|
|
def forward(self, inputs: Tensor) -> Tensor: |
|
|
norm_axis = _normalize_axes(self.axis, inputs.ndim) |
|
|
kernel_contract_axes = tuple(range(len(norm_axis))) |
|
|
|
|
|
output = torch.tensordot( |
|
|
inputs.to(self.weight.dtype), |
|
|
self.weight, |
|
|
dims=(norm_axis, kernel_contract_axes), |
|
|
).to(inputs.dtype) |
|
|
return output |
|
|
|
|
|
|
|
|
class MlpBlock(nn.Module): |
|
|
"""MLP block using DenseGeneral.""" |
|
|
|
|
|
def __init__(self, embed_dim: int, intermediate_dim: int, out_dim:int=None): |
|
|
super().__init__() |
|
|
|
|
|
self.wi_fused = DenseGeneral( |
|
|
in_shapes=(embed_dim,), |
|
|
out_features=(2, intermediate_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
if out_dim is None : |
|
|
out_dim = embed_dim |
|
|
|
|
|
self.wo = DenseGeneral( |
|
|
in_shapes=(intermediate_dim,), |
|
|
out_features=(out_dim,), |
|
|
axis=(-1,), |
|
|
) |
|
|
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
|
"""Forward pass.""" |
|
|
fused_x = self.wi_fused(x) |
|
|
|
|
|
gate = fused_x[..., 0, :] |
|
|
up = fused_x[..., 1, :] |
|
|
|
|
|
hidden = torch.mul(F.silu(gate), up) |
|
|
|
|
|
output = self.wo(hidden) |
|
|
return output |
|
|
|
|
|
|
|
|
class LlamaAdaptiveRMSNorm(nn.Module): |
|
|
def __init__(self, hidden_size=1024, eps=1e-6, dim_cond=1024): |
|
|
super().__init__() |
|
|
self.to_weight = nn.Linear(dim_cond, hidden_size) |
|
|
nn.init.zeros_(self.to_weight.weight) |
|
|
nn.init.ones_(self.to_weight.bias) |
|
|
self.variance_epsilon = eps |
|
|
self._is_hf_initialized = True |
|
|
|
|
|
def forward(self, hidden_states, cond_embedding): |
|
|
input_dtype = hidden_states.dtype |
|
|
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) |
|
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
|
|
|
|
|
weight = self.to_weight(cond_embedding) |
|
|
if len(weight.shape) == 2: |
|
|
weight = weight.unsqueeze(1) |
|
|
|
|
|
return (weight * hidden_states).to(input_dtype) |
|
|
|
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
|
"""Rotary Position Embedding (RoPE) implementation in PyTorch.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
embedding_dims: int, |
|
|
min_timescale: int = 1, |
|
|
max_timescale: int = 10000, |
|
|
): |
|
|
super().__init__() |
|
|
if embedding_dims % 2 != 0: |
|
|
raise ValueError("Embedding dim must be even for RoPE.") |
|
|
self.embedding_dims = embedding_dims |
|
|
self.min_timescale = min_timescale |
|
|
self.max_timescale = max_timescale |
|
|
|
|
|
half_embedding_dim = embedding_dims // 2 |
|
|
fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims |
|
|
timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32) |
|
|
self.register_buffer("timescale", timescale, persistent=False) |
|
|
|
|
|
def forward(self, inputs: torch.Tensor, position: torch.Tensor): |
|
|
"""Applies RoPE.""" |
|
|
position = position.unsqueeze(-1).unsqueeze(-1) |
|
|
sinusoid_inp = position / self.timescale |
|
|
sin = torch.sin(sinusoid_inp) |
|
|
cos = torch.cos(sinusoid_inp) |
|
|
first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1) |
|
|
first_part = first_half * cos - second_half * sin |
|
|
second_part = second_half * cos + first_half * sin |
|
|
return torch.cat((first_part, second_part), dim=-1) |
|
|
|
|
|
def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor): |
|
|
first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1) |
|
|
first_part = first_half * cos - second_half * sin |
|
|
second_part = second_half * cos + first_half * sin |
|
|
return torch.cat((first_part, second_part), dim=-1) |
|
|
|
|
|
class selfAttention(nn.Module): |
|
|
"""Attention using DenseGeneral.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
config, |
|
|
q_embed_dim: int, |
|
|
kv_embed_dim: int, |
|
|
num_query_heads: int, |
|
|
num_kv_heads: int, |
|
|
head_dim: int, |
|
|
is_cross_attn: bool = False, |
|
|
out_embed_dim: int = None, |
|
|
output_attentions=False, |
|
|
): |
|
|
super().__init__() |
|
|
self.num_query_heads = num_query_heads |
|
|
self.num_kv_heads = num_kv_heads |
|
|
self.head_dim = head_dim |
|
|
self.is_cross_attn = is_cross_attn |
|
|
self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim |
|
|
self.projected_query_dim = num_query_heads * head_dim |
|
|
if num_query_heads % num_kv_heads != 0: |
|
|
raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})") |
|
|
self.num_gqa_groups = num_query_heads // num_kv_heads |
|
|
self.kv_embed_dim = kv_embed_dim |
|
|
self.q_embed_dim = q_embed_dim |
|
|
self.output_attentions = output_attentions |
|
|
self.dropout_rate = config.model.dropout_rate |
|
|
|
|
|
|
|
|
|
|
|
self.q_proj = DenseGeneral( |
|
|
in_shapes=(q_embed_dim,), |
|
|
out_features=(num_query_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.k_proj = DenseGeneral( |
|
|
in_shapes=(kv_embed_dim,), |
|
|
out_features=(num_kv_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.v_proj = DenseGeneral( |
|
|
in_shapes=(kv_embed_dim,), |
|
|
out_features=(num_kv_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.o_proj = DenseGeneral( |
|
|
in_shapes=(num_query_heads, head_dim), |
|
|
out_features=(self.output_dim,), |
|
|
axis=(-2, -1), |
|
|
) |
|
|
|
|
|
|
|
|
self.rotary_emb = RotaryEmbedding( |
|
|
embedding_dims=self.head_dim, |
|
|
min_timescale=config.model.rope_min_timescale, |
|
|
max_timescale=config.model.rope_max_timescale, |
|
|
) |
|
|
|
|
|
self.is_fused_qkv = False |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
X: torch.Tensor, |
|
|
q_positions: torch.Tensor, |
|
|
kv_positions: torch.Tensor = None, |
|
|
attn_mask: torch.Tensor = None, |
|
|
cache: KVCache = None, |
|
|
prefill: bool = False, |
|
|
is_causal: bool = False, |
|
|
) : |
|
|
""" |
|
|
Performs attention calculation with optional KV caching. |
|
|
|
|
|
Args: |
|
|
Xq: Query tensor (B, T, D). T=1 during single-step decoding. |
|
|
Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn. |
|
|
q_positions: Positions for queries (B, T). |
|
|
kv_positions: Positions for keys/values (B, S). If None, uses q_positions. |
|
|
attn_mask: Attention mask. |
|
|
cache: KVCache. |
|
|
prefill: If True, use prefill mode. |
|
|
|
|
|
Returns: |
|
|
A tuple containing: |
|
|
- output: The attention output tensor (B, T, output_dim). |
|
|
- present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv. |
|
|
""" |
|
|
if kv_positions is None: |
|
|
kv_positions = q_positions |
|
|
|
|
|
original_dtype = X.dtype |
|
|
|
|
|
|
|
|
Xq_BxTxNxH = self.q_proj(X) |
|
|
Xk_BxSxKxH = self.k_proj(X) |
|
|
Xv_BxSxKxH = self.v_proj(X) |
|
|
|
|
|
position = q_positions.unsqueeze(-1).unsqueeze(-1) |
|
|
sinusoid_inp = position / self.rotary_emb.timescale |
|
|
sin = torch.sin(sinusoid_inp) |
|
|
cos = torch.cos(sinusoid_inp) |
|
|
|
|
|
Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos) |
|
|
Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos) |
|
|
|
|
|
Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2) |
|
|
|
|
|
attn_k = None |
|
|
attn_v = None |
|
|
|
|
|
Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) |
|
|
Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) |
|
|
|
|
|
if cache is None: |
|
|
attn_k = Xk_BxKxSxH |
|
|
attn_v = Xv_BxKxSxH |
|
|
else: |
|
|
attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
attn_output = F.scaled_dot_product_attention( |
|
|
Xq_BxNxTxH, |
|
|
attn_k, |
|
|
attn_v, |
|
|
attn_mask=attn_mask if not is_causal else None, |
|
|
scale=None, |
|
|
enable_gqa=self.num_gqa_groups > 1, |
|
|
is_causal=is_causal, |
|
|
dropout_p=self.dropout_rate if self.training else 0.0 |
|
|
) |
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
output = self.o_proj(attn_output) |
|
|
|
|
|
return output.to(original_dtype) |
|
|
|
|
|
class CrossAttention(nn.Module): |
|
|
"""Cross-Attention using DenseGeneral.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
config, |
|
|
q_embed_dim: int, |
|
|
kv_embed_dim: int, |
|
|
num_query_heads: int, |
|
|
num_kv_heads: int, |
|
|
head_dim: int, |
|
|
out_embed_dim: int = None, |
|
|
output_attentions=False |
|
|
): |
|
|
super().__init__() |
|
|
self.num_query_heads = num_query_heads |
|
|
self.num_kv_heads = num_kv_heads |
|
|
self.head_dim = head_dim |
|
|
self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim |
|
|
self.projected_query_dim = num_query_heads * head_dim |
|
|
if num_query_heads % num_kv_heads != 0: |
|
|
raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})") |
|
|
self.num_gqa_groups = num_query_heads // num_kv_heads |
|
|
self.output_attentions=output_attentions |
|
|
self.dropout_rate = config.model.dropout_rate |
|
|
|
|
|
self.q_proj = DenseGeneral( |
|
|
in_shapes=(q_embed_dim,), |
|
|
out_features=(num_query_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.k_proj = DenseGeneral( |
|
|
in_shapes=(kv_embed_dim,), |
|
|
out_features=(num_kv_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.v_proj = DenseGeneral( |
|
|
in_shapes=(kv_embed_dim,), |
|
|
out_features=(num_kv_heads, head_dim), |
|
|
axis=(-1,), |
|
|
) |
|
|
self.o_proj = DenseGeneral( |
|
|
in_shapes=(num_query_heads, head_dim), |
|
|
out_features=(self.output_dim,), |
|
|
axis=(-2, -1), |
|
|
) |
|
|
|
|
|
|
|
|
self.rotary_emb = RotaryEmbedding( |
|
|
embedding_dims=self.head_dim, |
|
|
min_timescale=config.model.rope_min_timescale, |
|
|
max_timescale=config.model.rope_max_timescale, |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
Xq: torch.Tensor, |
|
|
q_positions: torch.Tensor, |
|
|
Xkv: torch.Tensor = None, |
|
|
kv_positions: torch.Tensor = None, |
|
|
attn_mask: torch.Tensor = None, |
|
|
cache: KVCache = None, |
|
|
is_causal: bool = False, |
|
|
): |
|
|
""" |
|
|
Performs attention calculation with optional KV caching. |
|
|
|
|
|
Args: |
|
|
Xq: Query tensor (B, T, D). T=1 during single-step decoding. |
|
|
Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn. |
|
|
q_positions: Positions for queries (B, T). |
|
|
kv_positions: Positions for keys/values (B, S). If None, uses q_positions. |
|
|
attn_mask: Attention mask. |
|
|
cache: KVCache. |
|
|
|
|
|
Returns: |
|
|
A tuple containing: |
|
|
- output: The attention output tensor (B, T, output_dim). |
|
|
- present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv. |
|
|
""" |
|
|
if kv_positions is None: |
|
|
kv_positions = q_positions |
|
|
original_dtype = Xq.dtype |
|
|
|
|
|
Xq_BxTxNxH = self.q_proj(Xq) |
|
|
Xq_BxTxNxH = self.rotary_emb(Xq_BxTxNxH, position=q_positions) |
|
|
Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2) |
|
|
|
|
|
attn_k = None |
|
|
attn_v = None |
|
|
if cache is not None : |
|
|
attn_k, attn_v = cache.k, cache.v |
|
|
else : |
|
|
attn_k = self.k_proj(Xkv) |
|
|
attn_v = self.v_proj(Xkv) |
|
|
attn_k = self.rotary_emb(attn_k, position=kv_positions) |
|
|
attn_k = attn_k.transpose(1, 2) |
|
|
attn_v = attn_v.transpose(1, 2) |
|
|
|
|
|
attn_output = F.scaled_dot_product_attention( |
|
|
Xq_BxNxTxH, |
|
|
attn_k, |
|
|
attn_v, |
|
|
attn_mask=attn_mask if not is_causal else None, |
|
|
scale=None, |
|
|
enable_gqa=self.num_gqa_groups > 1, |
|
|
is_causal=is_causal, |
|
|
dropout_p=self.dropout_rate if self.training else 0.0 |
|
|
) |
|
|
if self.output_attentions : |
|
|
attn_weight = attn_output @ torch.linalg.pinv(attn_v) |
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
output = self.o_proj(attn_output) |
|
|
|
|
|
if self.output_attentions : |
|
|
return output, attn_weight |
|
|
return output.to(original_dtype) |
|
|
|
|
|
|
|
|
class EncoderLayer(nn.Module): |
|
|
"""Transformer Encoder Layer using DenseGeneral.""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
model_config = config.model |
|
|
enc_config = config.model.encoder |
|
|
embed_dim = enc_config.n_embd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.pre_sa_norm = LlamaAdaptiveRMSNorm( |
|
|
hidden_size=embed_dim, dim_cond=embed_dim |
|
|
) |
|
|
self.self_attention = selfAttention( |
|
|
config, |
|
|
q_embed_dim=embed_dim, |
|
|
kv_embed_dim=embed_dim, |
|
|
num_query_heads=enc_config.n_head, |
|
|
num_kv_heads=enc_config.n_head, |
|
|
head_dim=enc_config.head_dim, |
|
|
is_cross_attn=False, |
|
|
out_embed_dim=embed_dim, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.post_sa_norm = LlamaAdaptiveRMSNorm( |
|
|
hidden_size=embed_dim, dim_cond=embed_dim |
|
|
) |
|
|
self.mlp = MlpBlock(embed_dim=embed_dim, intermediate_dim=enc_config.n_hidden) |
|
|
self.dropout = nn.Dropout(config.model.dropout_rate) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
x: torch.Tensor, |
|
|
state: EncoderInferenceState, |
|
|
cond_emb: torch.Tensor = None |
|
|
) -> torch.Tensor: |
|
|
|
|
|
residual = x |
|
|
x_norm = self.pre_sa_norm(x, cond_embedding=cond_emb) |
|
|
|
|
|
sa_out = self.self_attention( |
|
|
X=x_norm, |
|
|
q_positions=state.positions, |
|
|
kv_positions=state.positions, |
|
|
attn_mask=state.attn_mask, |
|
|
) |
|
|
x = residual + self.dropout(sa_out) |
|
|
|
|
|
residual = x |
|
|
x_norm = self.post_sa_norm(x, cond_embedding=cond_emb) |
|
|
mlp_out = self.mlp(x_norm) |
|
|
x = residual + self.dropout(mlp_out) |
|
|
|
|
|
return x |
|
|
|
|
|
|
|
|
class Decoder(nn.Module): |
|
|
"""Transformer Decoder Stack using DenseGeneral.""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
model_config = config.model |
|
|
dec_config = config.model.decoder |
|
|
data_config = config.data |
|
|
self.num_layers = dec_config.n_layer |
|
|
|
|
|
|
|
|
self.mask_ratio_generator = stats.truncnorm((config.model.mask_ratio_min - 1.0) / 0.25, 0, loc=1.0, scale=0.25) |
|
|
|
|
|
self.sep_emb = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd)) |
|
|
torch.nn.init.normal_(self.sep_emb, std=.02) |
|
|
|
|
|
self.mask_emb = nn.Parameter(torch.zeros(1, config.model.inp_dim)) |
|
|
torch.nn.init.normal_(self.mask_emb, std=.02) |
|
|
|
|
|
self.embedding_dense = DenseGeneral( |
|
|
in_shapes=(dec_config.inp_dim,), |
|
|
out_features=(1, dec_config.n_embd), |
|
|
axis=(-1,), |
|
|
) |
|
|
|
|
|
self.layers = nn.ModuleList( |
|
|
[DecoderLayer(config=config) for _ in range(self.num_layers)] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.norm = LlamaAdaptiveRMSNorm( |
|
|
hidden_size=embed_dim, dim_cond=embed_dim |
|
|
) |
|
|
self.dropout = nn.Dropout(config.model.dropout_rate) |
|
|
|
|
|
self.reconstructor = MlpBlock( |
|
|
embed_dim=dec_config.n_embd, |
|
|
intermediate_dim=dec_config.n_hidden, |
|
|
out_dim = dec_config.inp_dim |
|
|
) |
|
|
|
|
|
|
|
|
def get_ids(self, text_input_ids, max_len, pad_value=1): |
|
|
bs, seq_len = text_input_ids.size() |
|
|
padding_size = max_len - seq_len |
|
|
|
|
|
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long() |
|
|
return torch.cat((text_input_ids, padding_tensor), dim=1) |
|
|
|
|
|
def mask_prob(self, t): |
|
|
return torch.sin(t * np.pi / 2).to(t.device) |
|
|
|
|
|
def get_t(self, x0) : |
|
|
t = torch.rand(x0.shape[0], device=x0.device, requires_grad=False) |
|
|
t = torch.clamp(t, 1e-5, 1.0) |
|
|
return t |
|
|
|
|
|
def mask_tgt_embeddings(self, x0): |
|
|
|
|
|
t = self.get_t(x0) |
|
|
new_t = t |
|
|
mask_prob = self.mask_prob(new_t) |
|
|
|
|
|
mask_prob = torch.where( |
|
|
mask_prob < 0.2, torch.ones_like(mask_prob) * 0.2, mask_prob |
|
|
) |
|
|
|
|
|
target_mask = torch.bernoulli(torch.ones_like(x0[:, :, 0]) * mask_prob[..., None]) |
|
|
|
|
|
|
|
|
|
|
|
xt = x0.clone() |
|
|
|
|
|
|
|
|
|
|
|
xt[(target_mask==1)] = self.mask_emb.to(xt.dtype) |
|
|
|
|
|
return xt, target_mask |
|
|
|
|
|
def random_masking(self, x, orders): |
|
|
|
|
|
bsz, seq_len, embed_dim = x.shape |
|
|
mask_rate = self.mask_ratio_generator.rvs(1)[0] |
|
|
num_masked_tokens = int(np.ceil(seq_len * mask_rate)) |
|
|
mask = torch.zeros(bsz, seq_len, device=x.device) |
|
|
mask = torch.scatter(mask, dim=-1, index=orders[:, :num_masked_tokens].to(x.device), |
|
|
src=torch.ones(bsz, seq_len, device=x.device)) |
|
|
return mask.long() |
|
|
def sample_orders(self, bsz, seq_len =32 ): |
|
|
|
|
|
orders = [] |
|
|
for _ in range(bsz): |
|
|
order = np.array(list(range(seq_len))) |
|
|
np.random.shuffle(order) |
|
|
orders.append(order) |
|
|
orders = torch.Tensor(np.array(orders)).long() |
|
|
return orders |
|
|
|
|
|
def mask_input(self, x) : |
|
|
bs, seq_len, _ = x.size() |
|
|
orders = self.sample_orders(bs,seq_len) |
|
|
mask = self.random_masking(x, orders) |
|
|
xt = x.clone() |
|
|
xt[(mask==1)] = self.mask_emb.to(xt.dtype) |
|
|
return xt, mask |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
enc_state: EncoderInferenceState, |
|
|
enc_out: torch.Tensor, |
|
|
quote_embs: torch.Tensor, |
|
|
dec_in : torch.Tensor, |
|
|
text_input_ids: torch.Tensor, |
|
|
labels:torch.Tensor = None) -> torch.Tensor: |
|
|
|
|
|
if self.training : |
|
|
|
|
|
|
|
|
x, target_mask = self.mask_input(dec_in) |
|
|
|
|
|
|
|
|
else : |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
target_mask=None |
|
|
x = dec_in |
|
|
|
|
|
x = self.embedding_dense(x).squeeze(2) |
|
|
|
|
|
|
|
|
|
|
|
x = torch.cat((quote_embs, self.sep_emb.repeat(x.size(0), 1,1).to(x.dtype), x), dim = 1) |
|
|
|
|
|
dec_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1)) |
|
|
|
|
|
state = DecoderInferenceState.new( |
|
|
self.config, enc_state, enc_out, dec_in_dummy |
|
|
) |
|
|
|
|
|
cross_attentions = () |
|
|
for i, layer in enumerate(self.layers): |
|
|
x, cattns = layer(x, state) |
|
|
cross_attentions += (cattns,) |
|
|
|
|
|
|
|
|
x = self.norm(x) |
|
|
|
|
|
|
|
|
|
|
|
reconstructed_input = self.reconstructor(self.dropout(x[:,-32:])).squeeze(2) |
|
|
|
|
|
if self.training : |
|
|
loss1 = F.mse_loss( |
|
|
reconstructed_input[:,-32:][(target_mask==1)], |
|
|
dec_in[(target_mask==1)], |
|
|
reduction="mean", |
|
|
) |
|
|
loss2 = F.l1_loss( |
|
|
reconstructed_input[:,-32:][(target_mask==1)], |
|
|
dec_in[(target_mask==1)], |
|
|
reduction="mean", |
|
|
) |
|
|
mask_loss = loss1 + loss2 |
|
|
else : |
|
|
if labels is not None : |
|
|
loss1 = F.mse_loss( |
|
|
reconstructed_input[:,-32:], |
|
|
labels, |
|
|
reduction="mean", |
|
|
) |
|
|
loss2 = F.l1_loss( |
|
|
reconstructed_input[:,-32:], |
|
|
labels, |
|
|
reduction="mean", |
|
|
) |
|
|
mask_loss = loss1 + loss2 |
|
|
else : |
|
|
mask_loss = None |
|
|
|
|
|
|
|
|
|
|
|
out = QuoteTTSOutput( |
|
|
logits=x, |
|
|
mask_loss=mask_loss, |
|
|
cross_attentions=cross_attentions, |
|
|
expressive_latents=reconstructed_input, |
|
|
target_mask=target_mask) |
|
|
return out |
|
|
|
|
|
|
|
|
class Encoder(nn.Module): |
|
|
"""Transformer Decoder Stack using DenseGeneral.""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
model_config = config.model |
|
|
dec_config = config.model.decoder |
|
|
data_config = config.data |
|
|
self.num_layers = dec_config.n_layer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.embedding_dense = nn.Linear(dec_config.inp_dim, dec_config.n_embd, bias=True) |
|
|
torch.nn.init.xavier_uniform_(self.embedding_dense.weight) |
|
|
torch.nn.init.constant_(self.embedding_dense.bias, 0) |
|
|
|
|
|
self.sep_emb = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd)) |
|
|
torch.nn.init.normal_(self.sep_emb, std=.02) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.layers = nn.ModuleList( |
|
|
[EncoderLayer(config=config) for _ in range(self.num_layers)] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.norm = LlamaAdaptiveRMSNorm( |
|
|
hidden_size=embed_dim, dim_cond=embed_dim |
|
|
) |
|
|
self.dropout = nn.Dropout(config.model.dropout_rate) |
|
|
|
|
|
def get_ids(self, text_input_ids, max_len, pad_value=1): |
|
|
bs, seq_len = text_input_ids.size() |
|
|
padding_size = max_len - seq_len |
|
|
|
|
|
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long() |
|
|
return torch.cat((text_input_ids, padding_tensor), dim=1) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
context_embs: torch.Tensor, |
|
|
audio_in : torch.Tensor, |
|
|
ref_in : torch.Tensor, |
|
|
text_input_ids: torch.Tensor, |
|
|
mask: torch.Tensor) -> torch.Tensor: |
|
|
|
|
|
|
|
|
bsz, seq_len, embed_dim = context_embs.shape |
|
|
|
|
|
x = self.embedding_dense(audio_in).squeeze(2) |
|
|
|
|
|
|
|
|
x = torch.cat((context_embs, self.sep_emb.repeat(bsz, 1,1).to(x.dtype), x), dim = 1) |
|
|
|
|
|
|
|
|
mask_with_buffer = torch.cat([torch.zeros(bsz, seq_len + 1, device=x.device), mask], dim=1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = x[(1-mask_with_buffer).nonzero(as_tuple=True)].reshape(bsz, -1, embed_dim) |
|
|
|
|
|
|
|
|
enc_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1)) |
|
|
state = EncoderInferenceState.new( |
|
|
self.config, enc_in_dummy |
|
|
) |
|
|
|
|
|
|
|
|
for i, layer in enumerate(self.layers): |
|
|
x = layer(x, state) |
|
|
|
|
|
|
|
|
|
|
|
x = self.norm(x) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return x |
|
|
|
|
|
|
|
|
class MaskedEncoder(nn.Module): |
|
|
"""Transformer Decoder Stack using DenseGeneral.""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
model_config = config.model |
|
|
dec_config = config.model.decoder |
|
|
data_config = config.data |
|
|
self.num_layers = dec_config.n_layer |
|
|
|
|
|
|
|
|
self.mask_token = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd)) |
|
|
torch.nn.init.normal_(self.mask_token, std=.02) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.embedding_dense = nn.Linear(dec_config.inp_dim, dec_config.n_embd, bias=True) |
|
|
torch.nn.init.xavier_uniform_(self.embedding_dense.weight) |
|
|
torch.nn.init.constant_(self.embedding_dense.bias, 0) |
|
|
|
|
|
self.layers = nn.ModuleList( |
|
|
[EncoderLayer(config=config) for _ in range(self.num_layers)] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.norm = LlamaAdaptiveRMSNorm( |
|
|
hidden_size=dec_config.n_embd, dim_cond=dec_config.n_embd |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_ids(self, text_input_ids, max_len, pad_value=1): |
|
|
bs, seq_len = text_input_ids.size() |
|
|
padding_size = max_len - seq_len |
|
|
|
|
|
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long() |
|
|
return torch.cat((text_input_ids, padding_tensor), dim=1) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
audio_in: torch.Tensor, |
|
|
context_embs: torch.Tensor, |
|
|
mask: torch.Tensor, |
|
|
text_input_ids : torch.Tensor, |
|
|
cond_emb: torch.Tensor) -> torch.Tensor: |
|
|
|
|
|
bsz, seq_len = text_input_ids.shape |
|
|
|
|
|
x = self.embedding_dense(audio_in).squeeze(2) |
|
|
|
|
|
mask_with_buffer = torch.cat([torch.zeros(bsz, seq_len, device=x.device), mask], dim=1) |
|
|
|
|
|
|
|
|
x = torch.cat((context_embs, x), dim = 1) |
|
|
|
|
|
x[(mask_with_buffer).nonzero(as_tuple=True)] = self.mask_token.to(x.dtype) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enc_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1)) |
|
|
state = EncoderInferenceState.new( |
|
|
self.config, enc_in_dummy |
|
|
) |
|
|
|
|
|
|
|
|
for i, layer in enumerate(self.layers): |
|
|
x = layer(x, state, cond_emb=cond_emb) |
|
|
|
|
|
|
|
|
|
|
|
x = self.norm(x, cond_embedding=cond_emb) |
|
|
|
|
|
|
|
|
x = x[:,-32:] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return x |
|
|
|
|
|
class EncoderDecoder( |
|
|
nn.Module, |
|
|
): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.mask_ratio_generator = stats.truncnorm((config.model.mask_ratio_min - 1.0) / 0.25, 0, loc=1.0, scale=0.25) |
|
|
|
|
|
|
|
|
self.context_encoder = T5EncoderModel.from_pretrained(config.model.base_encoder_path) |
|
|
for p in self.context_encoder.parameters(): |
|
|
p.requires_grad = False |
|
|
self.context_encoder = self.context_encoder.eval() |
|
|
|
|
|
|
|
|
self.decoder = MaskedEncoder(config) |
|
|
|
|
|
self.diffloss = DiffLoss( |
|
|
target_channels=config.model.inp_dim, |
|
|
z_channels=1024, |
|
|
width=1024, |
|
|
depth=8, |
|
|
num_sampling_steps='100', |
|
|
grad_checkpointing=False |
|
|
) |
|
|
|
|
|
self.mask_step_embedding = SinusoidalPosEmb(config.model.decoder.n_embd) |
|
|
self.mask_step_mlp = nn.Sequential( |
|
|
nn.Linear(config.model.decoder.n_embd, config.model.decoder.n_embd * 4), |
|
|
nn.SiLU(), |
|
|
nn.Linear(config.model.decoder.n_embd * 4, config.model.decoder.n_embd), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def random_masking(self, x, orders): |
|
|
|
|
|
bsz, seq_len, embed_dim = x.shape |
|
|
mask_rate = self.mask_ratio_generator.rvs(1)[0] |
|
|
num_masked_tokens = int(np.ceil(seq_len * mask_rate)) |
|
|
mask = torch.zeros(bsz, seq_len, device=x.device) |
|
|
mask = torch.scatter(mask, dim=-1, index=orders[:, :num_masked_tokens].to(x.device), |
|
|
src=torch.ones(bsz, seq_len, device=x.device)) |
|
|
return mask.long() |
|
|
|
|
|
def sample_orders(self, bsz, seq_len =32 ): |
|
|
|
|
|
orders = [] |
|
|
for _ in range(bsz): |
|
|
order = np.array(list(range(seq_len))) |
|
|
np.random.shuffle(order) |
|
|
orders.append(order) |
|
|
orders = torch.Tensor(np.array(orders)).long() |
|
|
return orders |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mask_prob(self, t): |
|
|
return torch.sin(t * np.pi / 2).to(t.device) |
|
|
|
|
|
def get_mask(self, bsz, device, seq_len=32) : |
|
|
t = torch.rand(bsz, device=device, requires_grad=False) |
|
|
t = torch.clamp(t, 1e-5, 1.0) |
|
|
mask_prob = self.mask_prob(t) |
|
|
mask_prob = torch.where( |
|
|
mask_prob < 0.2, torch.ones_like(mask_prob) * 0.2, mask_prob |
|
|
) |
|
|
mask = torch.bernoulli(torch.ones(bsz, seq_len, device=device) * mask_prob[..., None]).long() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return t, mask |
|
|
|
|
|
def forward_loss(self, z, target, mask, diffusion_batch_mul=4): |
|
|
bsz, seq_len, _ = target.shape |
|
|
target = target.reshape(bsz * seq_len, -1).repeat(diffusion_batch_mul, 1) |
|
|
z = z.reshape(bsz*seq_len, -1).repeat(diffusion_batch_mul, 1) |
|
|
mask = mask.reshape(bsz*seq_len).repeat(diffusion_batch_mul) |
|
|
loss = self.diffloss(z=z, target=target, mask=mask) |
|
|
return loss |
|
|
|
|
|
def get_diff_t(self, x) : |
|
|
|
|
|
return torch.randint(0, self.diffloss.train_diffusion.num_timesteps, (x.shape[0] * 32, ), device=x.device) |
|
|
|
|
|
def _forward( |
|
|
self, |
|
|
context: torch.Tensor, |
|
|
quote: torch.Tensor, |
|
|
dec_in_ref: torch.Tensor, |
|
|
transformer_in : torch.Tensor, |
|
|
dec_in_tgt: torch.Tensor, |
|
|
labels: torch.Tensor = None |
|
|
) : |
|
|
|
|
|
bsz, _ = context.size() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask_t, mask = self.get_mask(bsz, device=context.device) |
|
|
mask_perc_emb = self.mask_step_embedding(mask_t) |
|
|
mask_perc_emb = self.mask_step_mlp(mask_perc_emb) |
|
|
|
|
|
|
|
|
enc_state = EncoderInferenceState.new(self.context_encoder.config, context) |
|
|
enc_out = self.context_encoder(input_ids=context,attention_mask=enc_state.padding_mask).last_hidden_state |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
z = self.decoder( |
|
|
transformer_in, |
|
|
enc_out, |
|
|
mask, |
|
|
context, |
|
|
cond_emb=mask_perc_emb) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gt_latents = transformer_in.clone().detach() |
|
|
loss = self.forward_loss(z, gt_latents, mask) |
|
|
|
|
|
out = QuoteTTSOutput( |
|
|
logits=z, |
|
|
loss=loss, |
|
|
expressive_latents=None) |
|
|
return out |
|
|
|
|
|
|
|
|
def forward( |
|
|
self, |
|
|
context: torch.Tensor, |
|
|
quote: torch.Tensor, |
|
|
dec_in_ref: torch.Tensor, |
|
|
transformer_in: torch.Tensor = None, |
|
|
dec_in_tgt: torch.Tensor = None, |
|
|
labels: torch.Tensor = None): |
|
|
|
|
|
|
|
|
|
|
|
if self.training : |
|
|
return self._forward( |
|
|
context=context, |
|
|
quote=quote, |
|
|
dec_in_ref=dec_in_ref, |
|
|
dec_in_tgt=dec_in_tgt, |
|
|
transformer_in=transformer_in, |
|
|
labels=labels) |
|
|
else : |
|
|
samples, z = self.sample_tokens(context, quote, dec_in_ref, num_iter=1) |
|
|
|
|
|
mask = torch.ones(z.size(0), z.size(1), device=context.device).long() |
|
|
|
|
|
loss = self.forward_loss(z, transformer_in, mask, diffusion_batch_mul=1) |
|
|
|
|
|
out = QuoteTTSOutput( |
|
|
logits=samples, |
|
|
loss=loss, |
|
|
labels=transformer_in) |
|
|
return out |
|
|
|
|
|
@classmethod |
|
|
def from_pretrained(cls, path: str, config_path: str= None): |
|
|
if config_path: |
|
|
with open(config_path) as f : |
|
|
config = yaml.safe_load(f) |
|
|
else : |
|
|
config = Config() |
|
|
|
|
|
model = cls(config) |
|
|
model.load_state_dict(torch.load(os.path.join(path, "pytorch_model.bin"), map_location="cpu")) |
|
|
return model |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def sample_tokens(self, context, quote, dec_in_ref, num_iter=10, seq_len=32, temperature=1.0): |
|
|
|
|
|
bsz = context.size(0) |
|
|
enc_state = EncoderInferenceState.new(self.context_encoder.config, context) |
|
|
enc_out = self.context_encoder(input_ids=context,attention_mask=enc_state.padding_mask).last_hidden_state |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokens = torch.zeros(bsz, seq_len, dec_in_ref.size(2), device=context.device) |
|
|
|
|
|
mask = torch.ones(bsz, seq_len, device=context.device) |
|
|
orders = self.sample_orders(bsz) |
|
|
|
|
|
|
|
|
h = 1.0/num_iter |
|
|
t_list = [1.0 - i * h for i in range(num_iter)] |
|
|
t_list.append(0.0) |
|
|
|
|
|
|
|
|
for step in range(num_iter): |
|
|
cur_tokens = tokens.clone() |
|
|
t = t_list[step] * torch.ones(bsz).to(mask.device) |
|
|
mask_perc_emb = self.mask_step_embedding(t) |
|
|
mask_perc_emb = self.mask_step_mlp(mask_perc_emb) |
|
|
|
|
|
|
|
|
|
|
|
z = self.decoder(cur_tokens, enc_out, mask, context, cond_emb=mask_perc_emb) |
|
|
|
|
|
|
|
|
mask_ratio = torch.Tensor([t_list[step+1]]).to(context.device) |
|
|
|
|
|
|
|
|
mask_len = (self.mask_prob(mask_ratio) * seq_len).long() |
|
|
|
|
|
|
|
|
mask_len = torch.maximum(torch.Tensor([1]).to(context.device), |
|
|
torch.minimum(torch.sum(mask, dim=-1, keepdims=True) - 1, mask_len)) |
|
|
|
|
|
|
|
|
mask_next = mask_by_order(mask_len[0], orders, bsz, seq_len).to(cur_tokens.device) |
|
|
if step >= num_iter - 1: |
|
|
mask_to_pred = mask[:bsz].bool() |
|
|
else: |
|
|
mask_to_pred = torch.logical_xor(mask[:bsz].bool(), mask_next.bool()) |
|
|
mask = mask_next |
|
|
|
|
|
full_z = z.clone() |
|
|
z = z[mask_to_pred.nonzero(as_tuple=True)] |
|
|
|
|
|
|
|
|
sampled_token_latent = self.diffloss.sample(z, temperature, cfg=1.0) |
|
|
cur_tokens[mask_to_pred.nonzero(as_tuple=True)] = sampled_token_latent |
|
|
tokens = cur_tokens.clone() |
|
|
|
|
|
return tokens, full_z |
|
|
|
|
|
def mask_by_order(mask_len, order, bsz, seq_len): |
|
|
masking = torch.zeros(bsz, seq_len) |
|
|
masking = torch.scatter(masking, dim=-1, index=order[:, :mask_len.long()], src=torch.ones(bsz, seq_len)).bool() |
|
|
return masking |
|
|
|
|
|
|
|
|
def top_p_sample(logits, thres=0.9): |
|
|
k = math.ceil((1 - thres) * logits.shape[-1]) |
|
|
val, ind = logits.topk(k, dim=-1) |
|
|
probs = torch.full_like(logits, float("-inf")) |
|
|
probs.scatter_(2, ind, val) |
|
|
return probs |
|
|
|
|
|
|
|
|
def log(t, eps=1e-10): |
|
|
return torch.log(t + eps) |
|
|
|
|
|
|
|
|
def gumbel_noise(t): |
|
|
noise = torch.zeros_like(t).uniform_(0, 1) |
|
|
return -log(-log(noise)) |
|
|
|
|
|
|
|
|
def gumbel_sample(t, temperature=1.0, dim=-1): |
|
|
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim) |
|
|
|
|
|
|
|
|
def apply_top_k_only( |
|
|
logits: torch.Tensor, |
|
|
k: torch.Tensor, |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Apply top-k mask to the logits. |
|
|
|
|
|
This implementation doesn't involve sorting the entire vocab. |
|
|
|
|
|
The logits tensor may be updated in-place. |
|
|
""" |
|
|
no_top_k_mask = k == logits.shape[1] |
|
|
|
|
|
k = k.masked_fill(no_top_k_mask, 1) |
|
|
max_top_k = k.max() |
|
|
|
|
|
|
|
|
k_index = k.sub_(1).unsqueeze(1) |
|
|
top_k_mask = logits.topk(max_top_k, dim=1).values.gather(1, k_index.long()) |
|
|
|
|
|
top_k_mask.masked_fill_(no_top_k_mask.unsqueeze(1), -float("inf")) |
|
|
logits.masked_fill_(logits < top_k_mask, -float("inf")) |
|
|
return logits |
|
|
|
|
|
def apply_top_k_top_p( |
|
|
logits: torch.Tensor, |
|
|
k: Optional[torch.Tensor], |
|
|
p: Optional[torch.Tensor], |
|
|
) -> torch.Tensor: |
|
|
"""Apply top-k and top-p masks to the logits. |
|
|
|
|
|
If a top-p is used, this function will sort the logits tensor, |
|
|
which can be slow for large batches. |
|
|
|
|
|
The logits tensor may be updated in-place. |
|
|
""" |
|
|
if p is None: |
|
|
if k is None: |
|
|
return logits |
|
|
|
|
|
|
|
|
return apply_top_k_only(logits, k) |
|
|
|
|
|
logits_sort, logits_idx = logits.sort(dim=-1, descending=False) |
|
|
|
|
|
if k is not None: |
|
|
|
|
|
top_k_mask = logits_sort.size(1) - k.to(torch.long) |
|
|
|
|
|
top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1)) |
|
|
top_k_mask = logits_sort < top_k_mask |
|
|
logits_sort.masked_fill_(top_k_mask, -float("inf")) |
|
|
|
|
|
if p is not None: |
|
|
|
|
|
probs_sort = logits_sort.softmax(dim=-1) |
|
|
probs_sum = torch.cumsum(probs_sort, dim=-1, out=probs_sort) |
|
|
top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1) |
|
|
|
|
|
top_p_mask[:, -1] = False |
|
|
logits_sort.masked_fill_(top_p_mask, -float("inf")) |
|
|
|
|
|
|
|
|
logits = logits_sort.scatter(dim=-1, index=logits_idx, src=logits_sort) |
|
|
return logits |
|
|
|
|
|
def _sample_next_token( |
|
|
logits_BCxV: torch.Tensor, |
|
|
temperature: float, |
|
|
top_p: float, |
|
|
top_k: int |
|
|
): |
|
|
if temperature in [0, None]: |
|
|
return torch.argmax(logits_BCxV, dim=-1) |
|
|
|
|
|
logits_BCxV = logits_BCxV / temperature |
|
|
logits = apply_top_k_top_p(logits_BCxV, torch.tensor([top_k]), torch.tensor([top_p])) |
|
|
|
|
|
final_probs_BCxV = torch.softmax(logits, dim=-1) |
|
|
|
|
|
sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1) |
|
|
sampled_indices_C = sampled_indices_BC.squeeze(-1) |
|
|
return sampled_indices_C |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|