QuoteTTS / layers_diffusion_4.py
gasmichel's picture
Upload layers_diffusion_4.py with huggingface_hub
bfd4528 verified
import torch
import torch.nn as nn
import torch.nn.functional as F
from huggingface_hub import PyTorchModelHubMixin
from torch import Tensor
from torch.nn import RMSNorm
import numpy as np
#from .config import DiaConfig
from state import DecoderInferenceState, EncoderInferenceState, KVCache
from transformers.modeling_outputs import CausalLMOutput,CausalLMOutputWithCrossAttentions
from safetensors.torch import load_file
from config import Config
import os
import math
from typing import Optional
from dataclasses import dataclass
from transformers.modeling_outputs import ModelOutput
from typing import Optional, Tuple
from transformers import T5EncoderModel
import math
from einops import rearrange
from convnext.convnext import ConvNeXtV2, IdentityConvNeXtV2
from text_encoder.model import T5Encoder
from scipy import stats
from diffloss import DiffLoss
@dataclass
class QuoteTTSOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mask_loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
labels: Optional[torch.FloatTensor] = None
expressive_latents: Optional[torch.FloatTensor] = None
labels_latents: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
target_mask: Optional[Tuple[torch.FloatTensor, ...]] = None
mu: Optional[torch.FloatTensor] = None
logvar: Optional[torch.FloatTensor] = None
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :] * 1.0
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:
return tuple(ax if ax >= 0 else ndim + ax for ax in axes)
class DenseGeneral(nn.Module):
"""
PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.
Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot
for the generalized matrix multiplication. Weight/bias shapes are calculated
and parameters created during initialization based on config.
`load_weights` validates shapes and copies data.
Attributes:
axis (Tuple[int, ...]): Input axis or axes to contract.
in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.
out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).
use_bias (bool): Whether to add a bias term.
weight (nn.Parameter): The kernel parameter.
bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).
"""
def __init__(
self,
in_shapes: tuple[int, ...],
out_features: tuple[int, ...],
axis: tuple[int, ...] = (-1,),
#weight_dtype: torch.dtype = None,
#device: torch.device = None,
):
super().__init__()
self.in_shapes = in_shapes
self.out_features = out_features
self.axis = axis
self.kernel_shape = self.in_shapes + self.out_features
# factory_kwargs = {"device": device, "dtype": weight_dtype}
self.weight = nn.Parameter(torch.empty(self.kernel_shape))
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
# torch.nn.init.normal_(self.weight, std=.02)
def forward(self, inputs: Tensor) -> Tensor:
norm_axis = _normalize_axes(self.axis, inputs.ndim)
kernel_contract_axes = tuple(range(len(norm_axis)))
output = torch.tensordot(
inputs.to(self.weight.dtype),
self.weight,
dims=(norm_axis, kernel_contract_axes),
).to(inputs.dtype)
return output
class MlpBlock(nn.Module):
"""MLP block using DenseGeneral."""
def __init__(self, embed_dim: int, intermediate_dim: int, out_dim:int=None):
super().__init__()
self.wi_fused = DenseGeneral(
in_shapes=(embed_dim,),
out_features=(2, intermediate_dim),
axis=(-1,),
)
if out_dim is None :
out_dim = embed_dim
self.wo = DenseGeneral(
in_shapes=(intermediate_dim,),
out_features=(out_dim,),
axis=(-1,),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass."""
fused_x = self.wi_fused(x)
gate = fused_x[..., 0, :]
up = fused_x[..., 1, :]
hidden = torch.mul(F.silu(gate), up)
output = self.wo(hidden)
return output
class LlamaAdaptiveRMSNorm(nn.Module):
def __init__(self, hidden_size=1024, eps=1e-6, dim_cond=1024):
super().__init__()
self.to_weight = nn.Linear(dim_cond, hidden_size)
nn.init.zeros_(self.to_weight.weight)
nn.init.ones_(self.to_weight.bias)
self.variance_epsilon = eps
self._is_hf_initialized = True # disable automatic init
def forward(self, hidden_states, cond_embedding):
input_dtype = hidden_states.dtype
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
weight = self.to_weight(cond_embedding)
if len(weight.shape) == 2:
weight = weight.unsqueeze(1)
return (weight * hidden_states).to(input_dtype)
class RotaryEmbedding(nn.Module):
"""Rotary Position Embedding (RoPE) implementation in PyTorch."""
def __init__(
self,
embedding_dims: int,
min_timescale: int = 1,
max_timescale: int = 10000,
):
super().__init__()
if embedding_dims % 2 != 0:
raise ValueError("Embedding dim must be even for RoPE.")
self.embedding_dims = embedding_dims
self.min_timescale = min_timescale
self.max_timescale = max_timescale
half_embedding_dim = embedding_dims // 2
fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims
timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)
self.register_buffer("timescale", timescale, persistent=False)
def forward(self, inputs: torch.Tensor, position: torch.Tensor):
"""Applies RoPE."""
position = position.unsqueeze(-1).unsqueeze(-1)
sinusoid_inp = position / self.timescale
sin = torch.sin(sinusoid_inp)
cos = torch.cos(sinusoid_inp)
first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)
first_part = first_half * cos - second_half * sin
second_part = second_half * cos + first_half * sin
return torch.cat((first_part, second_part), dim=-1)
def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):
first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)
first_part = first_half * cos - second_half * sin
second_part = second_half * cos + first_half * sin
return torch.cat((first_part, second_part), dim=-1)
class selfAttention(nn.Module):
"""Attention using DenseGeneral."""
def __init__(
self,
config,
q_embed_dim: int,
kv_embed_dim: int,
num_query_heads: int,
num_kv_heads: int,
head_dim: int,
is_cross_attn: bool = False,
out_embed_dim: int = None,
output_attentions=False,
):
super().__init__()
self.num_query_heads = num_query_heads
self.num_kv_heads = num_kv_heads
self.head_dim = head_dim
self.is_cross_attn = is_cross_attn
self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim
self.projected_query_dim = num_query_heads * head_dim
if num_query_heads % num_kv_heads != 0:
raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})")
self.num_gqa_groups = num_query_heads // num_kv_heads
self.kv_embed_dim = kv_embed_dim
self.q_embed_dim = q_embed_dim
self.output_attentions = output_attentions
self.dropout_rate = config.model.dropout_rate
# self.dropout = nn.Dropout(config.dropout_rate)
# --- Projection Layers using DenseGeneral ---
self.q_proj = DenseGeneral(
in_shapes=(q_embed_dim,),
out_features=(num_query_heads, head_dim),
axis=(-1,),
)
self.k_proj = DenseGeneral(
in_shapes=(kv_embed_dim,),
out_features=(num_kv_heads, head_dim),
axis=(-1,),
)
self.v_proj = DenseGeneral(
in_shapes=(kv_embed_dim,),
out_features=(num_kv_heads, head_dim),
axis=(-1,),
)
self.o_proj = DenseGeneral(
in_shapes=(num_query_heads, head_dim),
out_features=(self.output_dim,),
axis=(-2, -1),
)
# --- Rotary Embedding ---
self.rotary_emb = RotaryEmbedding(
embedding_dims=self.head_dim,
min_timescale=config.model.rope_min_timescale,
max_timescale=config.model.rope_max_timescale,
)
self.is_fused_qkv = False
def forward(
self,
X: torch.Tensor, # (B, T, D) T = 1 in AR generation
q_positions: torch.Tensor, # (B, T)
kv_positions: torch.Tensor = None, # (B, S)
attn_mask: torch.Tensor = None, # None in Decoder self Attention, Valid mask in Others
cache: KVCache = None, # None in Encoder, KVCache in Decoder
prefill: bool = False,
is_causal: bool = False,
) :
"""
Performs attention calculation with optional KV caching.
Args:
Xq: Query tensor (B, T, D). T=1 during single-step decoding.
Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.
q_positions: Positions for queries (B, T).
kv_positions: Positions for keys/values (B, S). If None, uses q_positions.
attn_mask: Attention mask.
cache: KVCache.
prefill: If True, use prefill mode.
Returns:
A tuple containing:
- output: The attention output tensor (B, T, output_dim).
- present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.
"""
if kv_positions is None:
kv_positions = q_positions
original_dtype = X.dtype
Xq_BxTxNxH = self.q_proj(X)
Xk_BxSxKxH = self.k_proj(X)
Xv_BxSxKxH = self.v_proj(X)
position = q_positions.unsqueeze(-1).unsqueeze(-1)
sinusoid_inp = position / self.rotary_emb.timescale
sin = torch.sin(sinusoid_inp)
cos = torch.cos(sinusoid_inp)
Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)
Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)
Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)
attn_k = None
attn_v = None
Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)
Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)
if cache is None:
attn_k = Xk_BxKxSxH
attn_v = Xv_BxKxSxH
else:
attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)
# print(Xq_BxNxTxH.size(), attn_k.size(), attn_v.size())
# print(attn_mask)
attn_output = F.scaled_dot_product_attention(
Xq_BxNxTxH,
attn_k,
attn_v,
attn_mask=attn_mask if not is_causal else None,
scale=None,
enable_gqa=self.num_gqa_groups > 1,
is_causal=is_causal,
dropout_p=self.dropout_rate if self.training else 0.0
)
attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)
output = self.o_proj(attn_output)
return output.to(original_dtype)
class CrossAttention(nn.Module):
"""Cross-Attention using DenseGeneral."""
def __init__(
self,
config,
q_embed_dim: int,
kv_embed_dim: int,
num_query_heads: int,
num_kv_heads: int,
head_dim: int,
out_embed_dim: int = None,
output_attentions=False
):
super().__init__()
self.num_query_heads = num_query_heads
self.num_kv_heads = num_kv_heads
self.head_dim = head_dim
self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim
self.projected_query_dim = num_query_heads * head_dim
if num_query_heads % num_kv_heads != 0:
raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})")
self.num_gqa_groups = num_query_heads // num_kv_heads
self.output_attentions=output_attentions
self.dropout_rate = config.model.dropout_rate
# --- Projection Layers using DenseGeneral ---
self.q_proj = DenseGeneral(
in_shapes=(q_embed_dim,),
out_features=(num_query_heads, head_dim),
axis=(-1,),
)
self.k_proj = DenseGeneral(
in_shapes=(kv_embed_dim,),
out_features=(num_kv_heads, head_dim),
axis=(-1,),
)
self.v_proj = DenseGeneral(
in_shapes=(kv_embed_dim,),
out_features=(num_kv_heads, head_dim),
axis=(-1,),
)
self.o_proj = DenseGeneral(
in_shapes=(num_query_heads, head_dim),
out_features=(self.output_dim,),
axis=(-2, -1),
)
# --- Rotary Embedding ---
self.rotary_emb = RotaryEmbedding(
embedding_dims=self.head_dim,
min_timescale=config.model.rope_min_timescale,
max_timescale=config.model.rope_max_timescale,
)
def forward(
self,
Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation
q_positions: torch.Tensor, # (B, T),
Xkv: torch.Tensor = None, # (B, S)
kv_positions: torch.Tensor = None, # (B, S)
attn_mask: torch.Tensor = None, # None in Decoder self Attention, Valid mask in Others
cache: KVCache = None, # None in Encoder, KVCache in Decoder
is_causal: bool = False,
):
"""
Performs attention calculation with optional KV caching.
Args:
Xq: Query tensor (B, T, D). T=1 during single-step decoding.
Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.
q_positions: Positions for queries (B, T).
kv_positions: Positions for keys/values (B, S). If None, uses q_positions.
attn_mask: Attention mask.
cache: KVCache.
Returns:
A tuple containing:
- output: The attention output tensor (B, T, output_dim).
- present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.
"""
if kv_positions is None:
kv_positions = q_positions
original_dtype = Xq.dtype
Xq_BxTxNxH = self.q_proj(Xq)
Xq_BxTxNxH = self.rotary_emb(Xq_BxTxNxH, position=q_positions)
Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)
attn_k = None
attn_v = None
if cache is not None :
attn_k, attn_v = cache.k, cache.v
else :
attn_k = self.k_proj(Xkv)
attn_v = self.v_proj(Xkv)
attn_k = self.rotary_emb(attn_k, position=kv_positions)
attn_k = attn_k.transpose(1, 2)
attn_v = attn_v.transpose(1, 2)
attn_output = F.scaled_dot_product_attention(
Xq_BxNxTxH,
attn_k,
attn_v,
attn_mask=attn_mask if not is_causal else None,
scale=None,
enable_gqa=self.num_gqa_groups > 1,
is_causal=is_causal,
dropout_p=self.dropout_rate if self.training else 0.0
)
if self.output_attentions :
attn_weight = attn_output @ torch.linalg.pinv(attn_v)
attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)
output = self.o_proj(attn_output)
if self.output_attentions :
return output, attn_weight
return output.to(original_dtype)
class EncoderLayer(nn.Module):
"""Transformer Encoder Layer using DenseGeneral."""
def __init__(self, config):
super().__init__()
self.config = config
model_config = config.model
enc_config = config.model.encoder
embed_dim = enc_config.n_embd
# self.pre_sa_norm = RMSNorm(
# embed_dim,
# eps=model_config.normalization_layer_epsilon,
# dtype=torch.float32,
# )
self.pre_sa_norm = LlamaAdaptiveRMSNorm(
hidden_size=embed_dim, dim_cond=embed_dim
)
self.self_attention = selfAttention(
config,
q_embed_dim=embed_dim,
kv_embed_dim=embed_dim,
num_query_heads=enc_config.n_head,
num_kv_heads=enc_config.n_head,
head_dim=enc_config.head_dim,
is_cross_attn=False,
out_embed_dim=embed_dim,
)
# self.post_sa_norm = RMSNorm(
# embed_dim,
# eps=model_config.normalization_layer_epsilon,
# dtype=torch.float32,
# )
self.post_sa_norm = LlamaAdaptiveRMSNorm(
hidden_size=embed_dim, dim_cond=embed_dim
)
self.mlp = MlpBlock(embed_dim=embed_dim, intermediate_dim=enc_config.n_hidden)
self.dropout = nn.Dropout(config.model.dropout_rate)
def forward(
self,
x: torch.Tensor,
state: EncoderInferenceState,
cond_emb: torch.Tensor = None
) -> torch.Tensor:
residual = x
x_norm = self.pre_sa_norm(x, cond_embedding=cond_emb)
sa_out = self.self_attention(
X=x_norm,
q_positions=state.positions,
kv_positions=state.positions,
attn_mask=state.attn_mask,
)
x = residual + self.dropout(sa_out)
residual = x
x_norm = self.post_sa_norm(x, cond_embedding=cond_emb)
mlp_out = self.mlp(x_norm)
x = residual + self.dropout(mlp_out)
return x
class Decoder(nn.Module):
"""Transformer Decoder Stack using DenseGeneral."""
def __init__(self, config):
super().__init__()
self.config = config
model_config = config.model
dec_config = config.model.decoder
data_config = config.data
self.num_layers = dec_config.n_layer
# self.embeddings = nn.Embedding(model_config.tgt_vocab_size, dec_config.n_embd)
self.mask_ratio_generator = stats.truncnorm((config.model.mask_ratio_min - 1.0) / 0.25, 0, loc=1.0, scale=0.25)
self.sep_emb = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd))# nn.Embedding(1, dec_config.n_embd)
torch.nn.init.normal_(self.sep_emb, std=.02)
self.mask_emb = nn.Parameter(torch.zeros(1, config.model.inp_dim))# nn.Embedding(1, config.model.inp_dim)
torch.nn.init.normal_(self.mask_emb, std=.02)
self.embedding_dense = DenseGeneral(
in_shapes=(dec_config.inp_dim,),
out_features=(1, dec_config.n_embd),
axis=(-1,),
)
self.layers = nn.ModuleList(
[DecoderLayer(config=config) for _ in range(self.num_layers)]
)
# self.norm = RMSNorm(
# dec_config.n_embd,
# eps=model_config.normalization_layer_epsilon,
# dtype=torch.float32,
# )
self.norm = LlamaAdaptiveRMSNorm(
hidden_size=embed_dim, dim_cond=embed_dim
)
self.dropout = nn.Dropout(config.model.dropout_rate)
self.reconstructor = MlpBlock(
embed_dim=dec_config.n_embd,
intermediate_dim=dec_config.n_hidden,
out_dim = dec_config.inp_dim
)
def get_ids(self, text_input_ids, max_len, pad_value=1):
bs, seq_len = text_input_ids.size()
padding_size = max_len - seq_len
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long()
return torch.cat((text_input_ids, padding_tensor), dim=1)
def mask_prob(self, t):
return torch.sin(t * np.pi / 2).to(t.device)
def get_t(self, x0) :
t = torch.rand(x0.shape[0], device=x0.device, requires_grad=False)
t = torch.clamp(t, 1e-5, 1.0)
return t
def mask_tgt_embeddings(self, x0):
# x0: semantic tokens (B, T)
t = self.get_t(x0)
new_t = t
mask_prob = self.mask_prob(new_t) # (B,)
# if mask_prob[i] < 0.2, mask_prob[i] = 0.2
mask_prob = torch.where(
mask_prob < 0.2, torch.ones_like(mask_prob) * 0.2, mask_prob
)
# Add mask
target_mask = torch.bernoulli(torch.ones_like(x0[:, :, 0]) * mask_prob[..., None])
# mask = torch.cat((torch.zeros_like(prefix), target_mask), dim=1)
# mask
xt = x0.clone()
# replace by pad embedding
# pad_emb = self.mask_emb.repeat(x0.shape[0], x0.shape[1], 1).to(x0.dtype) #self.pad_emb(torch.zeros(1, dtype=torch.int32, device=x0.device)).squeeze(0) # torch.zeros(1, device=x0.device)
xt[(target_mask==1)] = self.mask_emb.to(xt.dtype)
return xt, target_mask
def random_masking(self, x, orders):
# generate token mask
bsz, seq_len, embed_dim = x.shape
mask_rate = self.mask_ratio_generator.rvs(1)[0]
num_masked_tokens = int(np.ceil(seq_len * mask_rate))
mask = torch.zeros(bsz, seq_len, device=x.device)
mask = torch.scatter(mask, dim=-1, index=orders[:, :num_masked_tokens].to(x.device),
src=torch.ones(bsz, seq_len, device=x.device))
return mask.long()
def sample_orders(self, bsz, seq_len =32 ):
# generate a batch of random generation orders
orders = []
for _ in range(bsz):
order = np.array(list(range(seq_len)))
np.random.shuffle(order)
orders.append(order)
orders = torch.Tensor(np.array(orders)).long()
return orders
def mask_input(self, x) :
bs, seq_len, _ = x.size()
orders = self.sample_orders(bs,seq_len)
mask = self.random_masking(x, orders)
xt = x.clone()
xt[(mask==1)] = self.mask_emb.to(xt.dtype)
return xt, mask
def forward(
self,
enc_state: EncoderInferenceState,
enc_out: torch.Tensor,
quote_embs: torch.Tensor,
dec_in : torch.Tensor,
text_input_ids: torch.Tensor,
labels:torch.Tensor = None) -> torch.Tensor:
if self.training :
# x_orig = dec_in
# x, target_mask = self.mask_tgt_embeddings(dec_in)
x, target_mask = self.mask_input(dec_in)
# x = self.embedding_dense(x).squeeze(2)
else :
# x_orig = self.embedding_dense(dec_in).squeeze(2)
# full of pad tokens
# bs, seq_len, _ = dec_in.size()
# x = self.mask_emb.unsqueeze(1).repeat(bs, seq_len,1)
# x =
# x = self.embedding_dense(x.unsqueeze(1))#.squeeze(2)
# x = pad_emb.unsqueeze(1).expand(dec_in.size(0),dec_in.size(1),1)
target_mask=None
x = dec_in
x = self.embedding_dense(x).squeeze(2)
# sep_emb = self.sep_emb(torch.zeros(1, dtype=torch.int32, device=dec_in.device)).expand(dec_in.size(0), 1, -1)
x = torch.cat((quote_embs, self.sep_emb.repeat(x.size(0), 1,1).to(x.dtype), x), dim = 1)
dec_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1))
# print(dec_in_dummy)
state = DecoderInferenceState.new(
self.config, enc_state, enc_out, dec_in_dummy
)
# print(state.attn_mask[1,0, 8:, 8:])
cross_attentions = ()
for i, layer in enumerate(self.layers):
x, cattns = layer(x, state)
cross_attentions += (cattns,)
# Final Norm
x = self.norm(x)
# print(x[:,-32:].size())
# reconstructed_input = self.reconstructor(self.dropout(x[:,-32:]).unsqueeze(1))
reconstructed_input = self.reconstructor(self.dropout(x[:,-32:])).squeeze(2)
if self.training :
loss1 = F.mse_loss(
reconstructed_input[:,-32:][(target_mask==1)],
dec_in[(target_mask==1)],
reduction="mean",
)
loss2 = F.l1_loss(
reconstructed_input[:,-32:][(target_mask==1)],
dec_in[(target_mask==1)],
reduction="mean",
)
mask_loss = loss1 + loss2
else :
if labels is not None :
loss1 = F.mse_loss(
reconstructed_input[:,-32:],
labels,
reduction="mean",
)
loss2 = F.l1_loss(
reconstructed_input[:,-32:],
labels,
reduction="mean",
)
mask_loss = loss1 + loss2
else :
mask_loss = None
out = QuoteTTSOutput(
logits=x,
mask_loss=mask_loss,
cross_attentions=cross_attentions,
expressive_latents=reconstructed_input,
target_mask=target_mask)#, kl_div_loss=loss_kl, mu=mu, logvar=logvar)
return out
class Encoder(nn.Module):
"""Transformer Decoder Stack using DenseGeneral."""
def __init__(self, config):
super().__init__()
self.config = config
model_config = config.model
dec_config = config.model.decoder
data_config = config.data
self.num_layers = dec_config.n_layer
# self.embeddings = nn.Embedding(model_config.tgt_vocab_size, dec_config.n_embd)
# self.embedding_dense = DenseGeneral(
# in_shapes=(dec_config.inp_dim,),
# out_features=(1, dec_config.n_embd),
# axis=(-1,),
# )
self.embedding_dense = nn.Linear(dec_config.inp_dim, dec_config.n_embd, bias=True)
torch.nn.init.xavier_uniform_(self.embedding_dense.weight)
torch.nn.init.constant_(self.embedding_dense.bias, 0)
self.sep_emb = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd))# nn.Embedding(1, dec_config.n_embd)
torch.nn.init.normal_(self.sep_emb, std=.02)
# self.z_proj_ln = nn.LayerNorm(dec_config.n_embd, eps=1e-6)
# self.encoder_pos_embed_learned = nn.Embedding(1024, dec_config.n_embd)
# torch.nn.init.normal_(self.encoder_pos_embed_learned.weight.data, std=.02)
# self.ref_dense = DenseGeneral(
# in_shapes=(dec_config.inp_dim * 32,),
# out_features=(1, dec_config.n_embd),
# axis=(-1,),
# )
# self.embedding_dense = IdentityConvNeXtV2(in_chans=1, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], num_classes=1024)
self.layers = nn.ModuleList(
[EncoderLayer(config=config) for _ in range(self.num_layers)]
)
# self.norm = RMSNorm(
# dec_config.n_embd,
# eps=model_config.normalization_layer_epsilon,
# dtype=torch.float32,
# )
self.norm = LlamaAdaptiveRMSNorm(
hidden_size=embed_dim, dim_cond=embed_dim
)
self.dropout = nn.Dropout(config.model.dropout_rate)
def get_ids(self, text_input_ids, max_len, pad_value=1):
bs, seq_len = text_input_ids.size()
padding_size = max_len - seq_len
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long()
return torch.cat((text_input_ids, padding_tensor), dim=1)
def forward(
self,
context_embs: torch.Tensor,
audio_in : torch.Tensor,
ref_in : torch.Tensor,
text_input_ids: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
bsz, seq_len, embed_dim = context_embs.shape
x = self.embedding_dense(audio_in).squeeze(2)
# ref_x = self.ref_dense(ref_in.view(x.size(0), -1)).squeeze(2)
# print(context_embs.size(), x.size())
x = torch.cat((context_embs, self.sep_emb.repeat(bsz, 1,1).to(x.dtype), x), dim = 1)
# x = x + ref_x
mask_with_buffer = torch.cat([torch.zeros(bsz, seq_len + 1, device=x.device), mask], dim=1)
# positional embeddings to let the model know the initial positions
# positions = torch.arange(x.size(1), device=x.device).unsqueeze(0).repeat(bsz,1).long()
# x = x + self.encoder_pos_embed_learned(positions)
# x = self.z_proj_ln(x)
# dropping
x = x[(1-mask_with_buffer).nonzero(as_tuple=True)].reshape(bsz, -1, embed_dim)
# state
enc_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1))
state = EncoderInferenceState.new(
self.config, enc_in_dummy
)
# print(state.attn_mask[1,0, 8:, 8:])
# cross_attentions = ()
for i, layer in enumerate(self.layers):
x = layer(x, state)
# cross_attentions += (cattns,)
# Final Norm
x = self.norm(x)
# reconstructed_input = self.reconstructor(self.dropout(x[:,-32:])).squeeze(2)
# gt_latents = audio_in.clone().detach()
# x = x[:,-32:] + self.diffusion_pos_embed_learned
# loss = self.forward_loss(x, gt_latents, target_mask)
# out = QuoteTTSOutput(
# logits=x,
# mask_loss=None,
# expressive_latents=None)
#, kl_div_loss=loss_kl, mu=mu, logvar=logvar)
return x
class MaskedEncoder(nn.Module):
"""Transformer Decoder Stack using DenseGeneral."""
def __init__(self, config):
super().__init__()
self.config = config
model_config = config.model
dec_config = config.model.decoder
data_config = config.data
self.num_layers = dec_config.n_layer
# self.embeddings = nn.Embedding(model_config.tgt_vocab_size, dec_config.n_embd)
self.mask_token = nn.Parameter(torch.zeros(1, 1, dec_config.n_embd))# nn.Embedding(1, config.model.inp_dim)
torch.nn.init.normal_(self.mask_token, std=.02)
# self.embedding_dense = DenseGeneral(
# in_shapes=(dec_config.n_embd,),
# out_features=(1, dec_config.n_embd),
# axis=(-1,),
# )
self.embedding_dense = nn.Linear(dec_config.inp_dim, dec_config.n_embd, bias=True)
torch.nn.init.xavier_uniform_(self.embedding_dense.weight)
torch.nn.init.constant_(self.embedding_dense.bias, 0)
self.layers = nn.ModuleList(
[EncoderLayer(config=config) for _ in range(self.num_layers)]
)
# self.norm = RMSNorm(
# dec_config.n_embd,
# eps=model_config.normalization_layer_epsilon,
# dtype=torch.float32,
# )
self.norm = LlamaAdaptiveRMSNorm(
hidden_size=dec_config.n_embd, dim_cond=dec_config.n_embd
)
# self.decoder_pos_embed_learned = nn.Embedding(1024, dec_config.n_embd)
# torch.nn.init.normal_(self.decoder_pos_embed_learned.weight.data, std=.02)
# self.diffusion_pos_embed_learned = nn.Parameter(torch.zeros(1, 32, config.model.decoder.n_embd))
# torch.nn.init.normal_(self.diffusion_pos_embed_learned, std=.02)
def get_ids(self, text_input_ids, max_len, pad_value=1):
bs, seq_len = text_input_ids.size()
padding_size = max_len - seq_len
padding_tensor = torch.empty(bs, padding_size, device=text_input_ids.device).fill_(pad_value).long()
return torch.cat((text_input_ids, padding_tensor), dim=1)
def forward(
self,
audio_in: torch.Tensor,
context_embs: torch.Tensor,
mask: torch.Tensor,
text_input_ids : torch.Tensor,
cond_emb: torch.Tensor) -> torch.Tensor:
bsz, seq_len = text_input_ids.shape
x = self.embedding_dense(audio_in).squeeze(2)
mask_with_buffer = torch.cat([torch.zeros(bsz, seq_len, device=x.device), mask], dim=1)
# mask tokens
x = torch.cat((context_embs, x), dim = 1)
# mask target tokens
x[(mask_with_buffer).nonzero(as_tuple=True)] = self.mask_token.to(x.dtype)
# positions = torch.arange(x_after_pad.size(1), device=x_after_pad.device).unsqueeze(0).repeat(bsz,1).long()
# x = x_after_pad + self.decoder_pos_embed_learned(positions)
# avoid attending to pad tokens
enc_in_dummy = self.get_ids(text_input_ids, max_len = x.size(1))
state = EncoderInferenceState.new(
self.config, enc_in_dummy
)
# print(state.attn_mask[1,0, 8:, 8:])
# cross_attentions = ()
for i, layer in enumerate(self.layers):
x = layer(x, state, cond_emb=cond_emb)
# cross_attentions += (cattns,)
# Final Norm
x = self.norm(x, cond_embedding=cond_emb)
# reconstructed_input = self.reconstructor(self.dropout(x[:,-32:])).squeeze(2)
x = x[:,-32:]
# x = x + self.diffusion_pos_embed_learned
# loss = self.forward_loss(x, gt_latents, target_mask)
# out = QuoteTTSOutput(
# logits=x,
# mask_loss=loss,
# expressive_latents=None)
#, kl_div_loss=loss_kl, mu=mu, logvar=logvar)
return x
class EncoderDecoder(
nn.Module,
):
def __init__(self, config):
super().__init__()
self.config = config
self.mask_ratio_generator = stats.truncnorm((config.model.mask_ratio_min - 1.0) / 0.25, 0, loc=1.0, scale=0.25)
# self.encoder = T5Encoder.from_pretrained(config.model.base_encoder_path, config.model.ft_encoder_path).encoder
self.context_encoder = T5EncoderModel.from_pretrained(config.model.base_encoder_path)
for p in self.context_encoder.parameters():
p.requires_grad = False
self.context_encoder = self.context_encoder.eval()
# self.encoder = Encoder(config)
self.decoder = MaskedEncoder(config)
self.diffloss = DiffLoss(
target_channels=config.model.inp_dim,
z_channels=1024,
width=1024,
depth=8,
num_sampling_steps='100',
grad_checkpointing=False
)
self.mask_step_embedding = SinusoidalPosEmb(config.model.decoder.n_embd)
self.mask_step_mlp = nn.Sequential(
nn.Linear(config.model.decoder.n_embd, config.model.decoder.n_embd * 4),
nn.SiLU(),
nn.Linear(config.model.decoder.n_embd * 4, config.model.decoder.n_embd),
)
# self.post_ln = nn.LayerNorm(config.model.decoder.n_embd)
# self.ref_dense = nn.Linear(config.model.inp_dim, config.model.decoder.n_embd, bias=True)
# torch.nn.init.xavier_uniform_(self.ref_dense.weight)
# torch.nn.init.constant_(self.ref_dense.bias, 0)
def random_masking(self, x, orders):
# generate token mask
bsz, seq_len, embed_dim = x.shape
mask_rate = self.mask_ratio_generator.rvs(1)[0]
num_masked_tokens = int(np.ceil(seq_len * mask_rate))
mask = torch.zeros(bsz, seq_len, device=x.device)
mask = torch.scatter(mask, dim=-1, index=orders[:, :num_masked_tokens].to(x.device),
src=torch.ones(bsz, seq_len, device=x.device))
return mask.long()
def sample_orders(self, bsz, seq_len =32 ):
# generate a batch of random generation orders
orders = []
for _ in range(bsz):
order = np.array(list(range(seq_len)))
np.random.shuffle(order)
orders.append(order)
orders = torch.Tensor(np.array(orders)).long()
return orders
# def mask_input(self, x) :
# bs, seq_len, _ = x.size()
# orders = self.sample_orders(bs,seq_len)
# mask = self.random_masking(x, orders)
# # xt = x.clone()
# # xt[(mask==1)] = self.mask_emb.to(xt.dtype)
# return mask
# def sample_orders(self, bsz, seq_len =32 ):
# # generate a batch of random generation orders
# orders = torch.arange(seq_len).unsqueeze(0).repeat(bsz,1)
# return orders
def mask_prob(self, t):
return torch.sin(t * np.pi / 2).to(t.device)
def get_mask(self, bsz, device, seq_len=32) :
t = torch.rand(bsz, device=device, requires_grad=False)
t = torch.clamp(t, 1e-5, 1.0)
mask_prob = self.mask_prob(t)
mask_prob = torch.where(
mask_prob < 0.2, torch.ones_like(mask_prob) * 0.2, mask_prob
)
mask = torch.bernoulli(torch.ones(bsz, seq_len, device=device) * mask_prob[..., None]).long()
# bs, seq_len, _ = x.size()
# orders = self.sample_orders(bs,seq_len)
# mask = self.random_masking(x, orders)
# xt = x.clone()
# xt[(mask==1)] = self.mask_emb.to(xt.dtype)
return t, mask
def forward_loss(self, z, target, mask, diffusion_batch_mul=4):
bsz, seq_len, _ = target.shape
target = target.reshape(bsz * seq_len, -1).repeat(diffusion_batch_mul, 1)
z = z.reshape(bsz*seq_len, -1).repeat(diffusion_batch_mul, 1)
mask = mask.reshape(bsz*seq_len).repeat(diffusion_batch_mul)
loss = self.diffloss(z=z, target=target, mask=mask)
return loss
def get_diff_t(self, x) :
# 32 is sequence length here
return torch.randint(0, self.diffloss.train_diffusion.num_timesteps, (x.shape[0] * 32, ), device=x.device)
def _forward(
self,
context: torch.Tensor,
quote: torch.Tensor,
dec_in_ref: torch.Tensor,
transformer_in : torch.Tensor,
dec_in_tgt: torch.Tensor,
labels: torch.Tensor = None
) :
bsz, _ = context.size()
# diffusion step embedding
# diff_t = self.get_diff_t(context)
# diffusion_step_emb = self.diff_step_embedding(diff_t)
# diffusion_step_emb = self.diff_step_mlp(diffusion_step_emb).reshape(bsz, 32, -1)
# mask step embedding
mask_t, mask = self.get_mask(bsz, device=context.device)
mask_perc_emb = self.mask_step_embedding(mask_t)
mask_perc_emb = self.mask_step_mlp(mask_perc_emb)
enc_state = EncoderInferenceState.new(self.context_encoder.config, context)
enc_out = self.context_encoder(input_ids=context,attention_mask=enc_state.padding_mask).last_hidden_state
# quote_embs = self.get_quote_embs(quote)
# mask = self.mask_input(transformer_in)
# x = self.encoder(enc_out, transformer_in, dec_in_ref, context, mask)
z = self.decoder(
transformer_in,
enc_out,
mask,
context,
cond_emb=mask_perc_emb)
# Reference latents
# z_ref = self.post_ln(self.ref_dense(dec_in_ref) + quote_embs)
# z = torch.cat((z, z_ref), dim = -1)
# print(t.size())
gt_latents = transformer_in.clone().detach()
loss = self.forward_loss(z, gt_latents, mask)
out = QuoteTTSOutput(
logits=z,
loss=loss,
expressive_latents=None)
return out
def forward(
self,
context: torch.Tensor,
quote: torch.Tensor,
dec_in_ref: torch.Tensor,
transformer_in: torch.Tensor = None,
dec_in_tgt: torch.Tensor = None,
labels: torch.Tensor = None):
# if self.training :
# if self.eval :
# print(labels[0])
if self.training :
return self._forward(
context=context,
quote=quote,
dec_in_ref=dec_in_ref,
dec_in_tgt=dec_in_tgt,
transformer_in=transformer_in,
labels=labels)
else :
samples, z = self.sample_tokens(context, quote, dec_in_ref, num_iter=1)
# print(samples)
mask = torch.ones(z.size(0), z.size(1), device=context.device).long()
# print(z.size())
loss = self.forward_loss(z, transformer_in, mask, diffusion_batch_mul=1)
out = QuoteTTSOutput(
logits=samples,
loss=loss,
labels=transformer_in)
return out
@classmethod
def from_pretrained(cls, path: str, config_path: str= None):
if config_path:
with open(config_path) as f :
config = yaml.safe_load(f)
else :
config = Config()
model = cls(config)
model.load_state_dict(torch.load(os.path.join(path, "pytorch_model.bin"), map_location="cpu"))
return model
# @torch.no_grad()
# def sample_all(self, context, quote,num_iter=10, seq_len=32):
# expressive_latents = self.sample_tokens(context,quote,num_iter,seq_len)
# out = self.decoder()
@torch.no_grad()
def sample_tokens(self, context, quote, dec_in_ref, num_iter=10, seq_len=32, temperature=1.0):
bsz = context.size(0)
enc_state = EncoderInferenceState.new(self.context_encoder.config, context)
enc_out = self.context_encoder(input_ids=context,attention_mask=enc_state.padding_mask).last_hidden_state
# enc_state_ = EncoderInferenceState.new(self.context_encoder.config, quote)
# quote_embs = self.context_encoder(input_ids=quote, attention_mask=enc_state_.padding_mask).last_hidden_state
# quote_embs = quote_embs.mean(1, keepdims=True)
# quote_embs = self.get_quote_embs(quote)
# ref latents
# z_ref = self.post_ln(self.ref_dense(dec_in_ref) + quote_embs)
# init and sample generation orders
tokens = torch.zeros(bsz, seq_len, dec_in_ref.size(2), device=context.device)#.repeat(bsz, seq_len, 1)
# dec_in = torch.zeros(bsz, seq_len, self.config.model.decoder.n_embed).cuda()
mask = torch.ones(bsz, seq_len, device=context.device)
orders = self.sample_orders(bsz)
# indices = list(range(num_iter))
h = 1.0/num_iter
t_list = [1.0 - i * h for i in range(num_iter)]
t_list.append(0.0)
# generate latents
for step in range(num_iter):
cur_tokens = tokens.clone()
t = t_list[step] * torch.ones(bsz).to(mask.device)
mask_perc_emb = self.mask_step_embedding(t)
mask_perc_emb = self.mask_step_mlp(mask_perc_emb)
# print(tokens[0])
# print(curr_t
# x = self.encoder(enc_out, cur_tokens, dec_in_ref, context, mask)
z = self.decoder(cur_tokens, enc_out, mask, context, cond_emb=mask_perc_emb)
# mask ratio for the next round, following MaskGIT and MAGE.
mask_ratio = torch.Tensor([t_list[step+1]]).to(context.device) #* torch.ones(bsz).to(mask.device)#np.cos(math.pi / 2. * (step + 1) / num_iter)
# print(mask_ratio)
# mask_len = torch.Tensor([np.floor(seq_len * mask_ratio)]).to(cur_tokens.device)
mask_len = (self.mask_prob(mask_ratio) * seq_len).long()
# masks out at least one for the next iteration
mask_len = torch.maximum(torch.Tensor([1]).to(context.device),
torch.minimum(torch.sum(mask, dim=-1, keepdims=True) - 1, mask_len))
# get masking for next iteration and locations to be predicted in this iteration
mask_next = mask_by_order(mask_len[0], orders, bsz, seq_len).to(cur_tokens.device)
if step >= num_iter - 1:
mask_to_pred = mask[:bsz].bool()
else:
mask_to_pred = torch.logical_xor(mask[:bsz].bool(), mask_next.bool())
mask = mask_next
# z = torch.cat((z, z_ref), dim = -1)
full_z = z.clone()
z = z[mask_to_pred.nonzero(as_tuple=True)]
# sample token latents for this step
# samples = out.expressive_latents[mask_to_pred.nonzero(as_tuple=True)]
sampled_token_latent = self.diffloss.sample(z, temperature, cfg=1.0)
cur_tokens[mask_to_pred.nonzero(as_tuple=True)] = sampled_token_latent
tokens = cur_tokens.clone()
# print(tokens[0,0])
return tokens, full_z
def mask_by_order(mask_len, order, bsz, seq_len):
masking = torch.zeros(bsz, seq_len)
masking = torch.scatter(masking, dim=-1, index=order[:, :mask_len.long()], src=torch.ones(bsz, seq_len)).bool()
return masking
def top_p_sample(logits, thres=0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim=-1)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(2, ind, val)
return probs
def log(t, eps=1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature=1.0, dim=-1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim)
def apply_top_k_only(
logits: torch.Tensor,
k: torch.Tensor,
) -> torch.Tensor:
"""
Apply top-k mask to the logits.
This implementation doesn't involve sorting the entire vocab.
The logits tensor may be updated in-place.
"""
no_top_k_mask = k == logits.shape[1]
# Set non-top-k rows to 1 so that we can gather.
k = k.masked_fill(no_top_k_mask, 1)
max_top_k = k.max()
# topk.values tensor has shape [batch_size, max_top_k].
# Convert top k to 0-based index in range [0, max_top_k).
k_index = k.sub_(1).unsqueeze(1)
top_k_mask = logits.topk(max_top_k, dim=1).values.gather(1, k_index.long())
# Handle non-topk rows.
top_k_mask.masked_fill_(no_top_k_mask.unsqueeze(1), -float("inf"))
logits.masked_fill_(logits < top_k_mask, -float("inf"))
return logits
def apply_top_k_top_p(
logits: torch.Tensor,
k: Optional[torch.Tensor],
p: Optional[torch.Tensor],
) -> torch.Tensor:
"""Apply top-k and top-p masks to the logits.
If a top-p is used, this function will sort the logits tensor,
which can be slow for large batches.
The logits tensor may be updated in-place.
"""
if p is None:
if k is None:
return logits
# Avoid sorting vocab for top-k only case.
return apply_top_k_only(logits, k)
logits_sort, logits_idx = logits.sort(dim=-1, descending=False)
if k is not None:
# Apply top-k.
top_k_mask = logits_sort.size(1) - k.to(torch.long) # shape: B
# Get all the top_k values.
top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1))
top_k_mask = logits_sort < top_k_mask
logits_sort.masked_fill_(top_k_mask, -float("inf"))
if p is not None:
# Apply top-p.
probs_sort = logits_sort.softmax(dim=-1)
probs_sum = torch.cumsum(probs_sort, dim=-1, out=probs_sort)
top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1)
# at least one
top_p_mask[:, -1] = False
logits_sort.masked_fill_(top_p_mask, -float("inf"))
# Re-sort the probabilities.
logits = logits_sort.scatter(dim=-1, index=logits_idx, src=logits_sort)
return logits
def _sample_next_token(
logits_BCxV: torch.Tensor,
temperature: float,
top_p: float,
top_k: int
):
if temperature in [0, None]:
return torch.argmax(logits_BCxV, dim=-1)
logits_BCxV = logits_BCxV / temperature
logits = apply_top_k_top_p(logits_BCxV, torch.tensor([top_k]), torch.tensor([top_p]))
final_probs_BCxV = torch.softmax(logits, dim=-1)
sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)
sampled_indices_C = sampled_indices_BC.squeeze(-1)
return sampled_indices_C
# def _sample_next_token(
# logits_BCxV: torch.Tensor,
# temperature: float,
# top_p: float,
# top_k: int,
# audio_eos_value: int,
# ) -> torch.Tensor:
# if temperature == 0.0:
# return torch.argmax(logits_BCxV, dim=-1)
# logits_BCxV = logits_BCxV / temperature
# if audio_eos_value is not None and audio_eos_value >= 0:
# top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)
# eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value
# mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)
# mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True
# logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)
# if top_k is not None:
# _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)
# mask = torch.ones_like(logits_BCxV, dtype=torch.bool)
# mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)
# logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)
# if top_p < 1.0:
# probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
# sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)
# cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)
# sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p
# sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)
# sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])
# indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)
# indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(
# dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV
# )
# logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)
# final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
# sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)
# sampled_indices_C = sampled_indices_BC.squeeze(-1)
# return sampled_indices_C
# @torch.no_grad()
# def generate(
# self,
# enc_in: torch.Tensor,
# temperature: float,
# top_p: float,
# top_k: int,
# output_attentions=False) :
# enc_in_uncond = torch.zeros_like(enc_in)
# enc_in = torch.cat((enc_in, enc_in_uncond), dim=0) # [B, T]
# enc_state = EncoderInferenceState.new(self.config, enc_in)
# enc_out = self.encoder(enc_in, enc_state)
# dec_in = torch.tensor([4096], device=enc_in.device).long().unsqueeze(0)
# dec_in_uncond = torch.tensor([4096], device=enc_in.device).long().unsqueeze(0)
# dec_in = torch.cat((dec_in, dec_in_uncond), dim=0)
# dec_state = DecoderInferenceState.new(
# self.config, enc_state, enc_out, dec_in
# )
# dec_state.cross_attn_mask = dec_state.cross_attn_mask[:,:,[0], :]
# # Masking CA for CFG
# dec_state.cross_attn_mask[-1,:,:, :] = False
# cnt = 0
# cross_attns = ()
# all_logits = ()
# while cnt < 34 :
# dec_state.prepare_step(0, cnt+1)
# # Masking CA for CFG
# dec_state.cross_attn_mask[-1,:,:, :] = False
# out = self.decoder(dec_in, dec_state)
# cross_attns += (out.cross_attentions, )
# logits = out['logits']
# # print(logits.size())
# # print(logits[0])
# # print(logits[1])
# logits = logits[0] + self.config.model.cfg_val * (logits[0] - logits[1])
# # logits = logits[0]
# # print(logits.size())
# all_logits += (logits,)
# ntp = _sample_next_token(
# logits.squeeze(1)[-1],
# temperature=temperature,
# top_k=top_k,
# top_p=top_p,
# audio_eos_value=4097)
# # print(dec_in.size(), ntp.size(), ntp)
# # dec_in = torch.cat((dec_in, ntp.unsqueeze(0).view(-1,1)), dim=1)
# dec_in = torch.cat((dec_in, torch.stack((ntp.unsqueeze(0), ntp.unsqueeze(0)))), dim=1)
# if ntp.item() == 4097 :
# break
# cnt += 1
# return dec_in, all_logits, cross_attns
# def _sample_next_token(
# logits_BCxV: torch.Tensor,
# temperature: float,
# top_p: float,
# top_k: int,
# audio_eos_value: int,
# ) -> torch.Tensor:
# if temperature == 0.0:
# return torch.argmax(logits_BCxV, dim=-1)
# logits_BCxV = logits_BCxV / temperature
# if audio_eos_value is not None and audio_eos_value >= 0:
# top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)
# eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value
# mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)
# mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True
# logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)
# if top_k is not None:
# _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)
# mask = torch.ones_like(logits_BCxV, dtype=torch.bool)
# mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)
# logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)
# if top_p < 1.0:
# probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
# sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)
# cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)
# sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p
# sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)
# sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])
# indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)
# indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(
# dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV
# )
# logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)
# final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
# sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)
# sampled_indices_C = sampled_indices_BC.squeeze(-1)
# return sampled_indices_C