|
import math
|
|
import numpy as np
|
|
import random
|
|
import torch
|
|
import torch.nn as nn
|
|
from transformers import BartModel
|
|
import torch.nn.functional as F
|
|
from huggingface_hub import PyTorchModelHubMixin
|
|
import pickle
|
|
from transformers import BartConfig
|
|
|
|
|
|
class Embeddings(nn.Module):
|
|
def __init__(self, n_token, d_model):
|
|
super().__init__()
|
|
self.lut = nn.Embedding(n_token, d_model)
|
|
self.d_model = d_model
|
|
|
|
def forward(self, x):
|
|
return self.lut(x) * math.sqrt(self.d_model)
|
|
|
|
|
|
class PianoBart(nn.Module):
|
|
def __init__(self, bartConfig, e2w, w2e):
|
|
super().__init__()
|
|
|
|
self.bart = BartModel(bartConfig)
|
|
self.hidden_size = bartConfig.d_model
|
|
self.bartConfig = bartConfig
|
|
|
|
|
|
self.n_tokens = []
|
|
self.classes = ['Bar', 'Position', 'Instrument', 'Pitch', 'Duration', 'Velocity', 'TimeSig', 'Tempo']
|
|
for key in self.classes:
|
|
self.n_tokens.append(len(e2w[key]))
|
|
self.emb_sizes = [256] * 8
|
|
self.e2w = e2w
|
|
self.w2e = w2e
|
|
|
|
|
|
self.bar_pad_word = self.e2w['Bar']['Bar <PAD>']
|
|
self.mask_word_np = np.array([self.e2w[etype]['%s <MASK>' % etype] for etype in self.classes], dtype=np.int64)
|
|
self.pad_word_np = np.array([self.e2w[etype]['%s <PAD>' % etype] for etype in self.classes], dtype=np.int64)
|
|
self.sos_word_np = np.array([self.e2w[etype]['%s <SOS>' % etype] for etype in self.classes], dtype=np.int64)
|
|
self.eos_word_np = np.array([self.e2w[etype]['%s <EOS>' % etype] for etype in self.classes], dtype=np.int64)
|
|
|
|
|
|
|
|
self.word_emb = []
|
|
for i, key in enumerate(self.classes):
|
|
self.word_emb.append(Embeddings(self.n_tokens[i], self.emb_sizes[i]))
|
|
self.word_emb = nn.ModuleList(self.word_emb)
|
|
|
|
|
|
self.encoder_linear = nn.Linear(np.sum(self.emb_sizes), bartConfig.d_model)
|
|
self.decoder_linear = self.encoder_linear
|
|
self.decoder_emb=None
|
|
|
|
|
|
def forward(self, input_ids_encoder, input_ids_decoder=None, encoder_attention_mask=None, decoder_attention_mask=None, output_hidden_states=True, generate=False):
|
|
encoder_embs = []
|
|
decoder_embs = []
|
|
for i, key in enumerate(self.classes):
|
|
encoder_embs.append(self.word_emb[i](input_ids_encoder[..., i]))
|
|
if self.decoder_emb is None and input_ids_decoder is not None:
|
|
decoder_embs.append(self.word_emb[i](input_ids_decoder[..., i]))
|
|
if self.decoder_emb is not None and input_ids_decoder is not None:
|
|
decoder_embs.append(self.decoder_emb(input_ids_decoder))
|
|
encoder_embs = torch.cat([*encoder_embs], dim=-1)
|
|
emb_linear_encoder = self.encoder_linear(encoder_embs)
|
|
if input_ids_decoder is not None:
|
|
decoder_embs = torch.cat([*decoder_embs], dim=-1)
|
|
emb_linear_decoder = self.decoder_linear(decoder_embs)
|
|
|
|
if input_ids_decoder is not None:
|
|
y = self.bart(inputs_embeds=emb_linear_encoder, decoder_inputs_embeds=emb_linear_decoder, attention_mask=encoder_attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=output_hidden_states)
|
|
else:
|
|
y=self.bart.encoder(inputs_embeds=emb_linear_encoder,attention_mask=encoder_attention_mask)
|
|
return y
|
|
|
|
def get_rand_tok(self):
|
|
rand=[0]*8
|
|
for i in range(8):
|
|
rand[i]=random.choice(range(self.n_tokens[i]))
|
|
return np.array(rand)
|
|
|
|
def change_decoder_embedding(self,new_embedding,new_linear=None):
|
|
self.decoder_emb=new_embedding
|
|
if new_linear is not None:
|
|
self.decoder_linear=new_linear
|
|
|
|
|
|
class PianoBartLM(nn.Module):
|
|
def __init__(self, pianobart: PianoBart):
|
|
super().__init__()
|
|
self.pianobart = pianobart
|
|
self.mask_lm = MLM(self.pianobart.e2w, self.pianobart.n_tokens, self.pianobart.hidden_size)
|
|
|
|
def forward(self,input_ids_encoder, input_ids_decoder=None, encoder_attention_mask=None, decoder_attention_mask=None,generate=False,device_num=-1):
|
|
if not generate:
|
|
x = self.pianobart(input_ids_encoder, input_ids_decoder, encoder_attention_mask, decoder_attention_mask)
|
|
return self.mask_lm(x)
|
|
else:
|
|
if input_ids_encoder.shape[0] !=1:
|
|
print("ERROR")
|
|
exit(-1)
|
|
if device_num==-1:
|
|
device=torch.device('cpu')
|
|
else:
|
|
device=torch.device('cuda:'+str(device_num))
|
|
pad=torch.from_numpy(self.pianobart.pad_word_np)
|
|
input_ids_decoder=pad.repeat(input_ids_encoder.shape[0],input_ids_encoder.shape[1],1).to(device)
|
|
result=pad.repeat(input_ids_encoder.shape[0],input_ids_encoder.shape[1],1).to(device)
|
|
decoder_attention_mask=torch.zeros_like(encoder_attention_mask).to(device)
|
|
input_ids_decoder[:,0,:] = torch.tensor(self.pianobart.sos_word_np)
|
|
decoder_attention_mask[:,0] = 1
|
|
for i in range(input_ids_encoder.shape[1]):
|
|
|
|
|
|
x = self.mask_lm(self.pianobart(input_ids_encoder, input_ids_decoder, encoder_attention_mask, decoder_attention_mask))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
current_output=self.sample(x,i)
|
|
|
|
if i!=input_ids_encoder.shape[1]-1:
|
|
input_ids_decoder[:,i+1,:]=current_output
|
|
decoder_attention_mask[:,i+1]+=1
|
|
|
|
if (current_output>=pad).any():
|
|
break
|
|
result[:,i,:]=current_output
|
|
return result
|
|
|
|
def sample(self,x,index):
|
|
|
|
t=[1.2,1.2,5,1,2,5,5,1.2]
|
|
p=[1,1,1,0.9,0.9,1,1,0.9]
|
|
result=[]
|
|
for j, etype in enumerate(self.pianobart.e2w):
|
|
y=x[j]
|
|
y=y[:,index,:]
|
|
y=sampling(y,p[j],t[j])
|
|
result.append(y)
|
|
return torch.tensor(result)
|
|
|
|
|
|
|
|
def nucleus(probs, p):
|
|
probs /= (sum(probs) + 1e-5)
|
|
sorted_probs = np.sort(probs)[::-1]
|
|
sorted_index = np.argsort(probs)[::-1]
|
|
cusum_sorted_probs = np.cumsum(sorted_probs)
|
|
after_threshold = cusum_sorted_probs > p
|
|
if sum(after_threshold) > 0:
|
|
last_index = np.where(after_threshold)[0][0] + 1
|
|
candi_index = sorted_index[:last_index]
|
|
else:
|
|
candi_index = sorted_index[0:1]
|
|
candi_probs = [probs[i] for i in candi_index]
|
|
candi_probs /= sum(candi_probs)
|
|
word = np.random.choice(candi_index, size=1, p=candi_probs)[0]
|
|
return word
|
|
|
|
|
|
def sampling(logit, p=None, t=1.0):
|
|
logit = logit.squeeze()
|
|
probs = torch.softmax(logit/t,dim=-1)
|
|
probs=probs.cpu().detach().numpy()
|
|
cur_word = nucleus(probs, p=p)
|
|
return cur_word
|
|
|
|
|
|
class MLM(nn.Module):
|
|
def __init__(self, e2w, n_tokens, hidden_size):
|
|
super().__init__()
|
|
self.proj = []
|
|
for i, etype in enumerate(e2w):
|
|
self.proj.append(nn.Linear(hidden_size, n_tokens[i]))
|
|
self.proj = nn.ModuleList(self.proj)
|
|
self.e2w = e2w
|
|
|
|
def forward(self, y):
|
|
y = y.last_hidden_state
|
|
ys = []
|
|
for i, etype in enumerate(self.e2w):
|
|
ys.append(self.proj[i](y))
|
|
return ys
|
|
|
|
|
|
class SelfAttention(nn.Module):
|
|
def __init__(self, input_dim, da, r):
|
|
'''
|
|
Args:
|
|
input_dim (int): batch, seq, input_dim
|
|
da (int): number of features in hidden layer from self-attn
|
|
r (int): number of aspects of self-attn
|
|
'''
|
|
super(SelfAttention, self).__init__()
|
|
self.ws1 = nn.Linear(input_dim, da, bias=False)
|
|
self.ws2 = nn.Linear(da, r, bias=False)
|
|
|
|
def forward(self, h):
|
|
attn_mat = F.softmax(self.ws2(torch.tanh(self.ws1(h))), dim=1)
|
|
attn_mat = attn_mat.permute(0,2,1)
|
|
return attn_mat
|
|
|
|
|
|
class SequenceClassification(nn.Module):
|
|
def __init__(self, pianobart, class_num, hs, da=128, r=4):
|
|
super().__init__()
|
|
self.pianobart = pianobart
|
|
self.attention = SelfAttention(hs, da, r)
|
|
self.classifier = nn.Sequential(
|
|
nn.Dropout(0.1),
|
|
nn.Linear(hs*r, 256),
|
|
nn.ReLU(),
|
|
nn.Linear(256, class_num)
|
|
)
|
|
|
|
def forward(self, input_ids_encoder, encoder_attention_mask=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = self.pianobart(input_ids_encoder=input_ids_encoder,input_ids_decoder=input_ids_encoder,encoder_attention_mask=encoder_attention_mask,decoder_attention_mask=encoder_attention_mask)
|
|
|
|
x = x.last_hidden_state
|
|
attn_mat = self.attention(x)
|
|
m = torch.bmm(attn_mat, x)
|
|
flatten = m.view(m.size()[0], -1)
|
|
res = self.classifier(flatten)
|
|
return res
|
|
|
|
|
|
class TokenClassification(nn.Module):
|
|
def __init__(self, pianobart, class_num, hs):
|
|
super().__init__()
|
|
self.pianobart = pianobart
|
|
self.classifier = nn.Sequential(
|
|
nn.Dropout(0.1),
|
|
nn.Linear(hs, 256),
|
|
nn.ReLU(),
|
|
nn.Linear(256, class_num)
|
|
)
|
|
|
|
def forward(self, input_ids_encoder, input_ids_decoder, encoder_attention_mask=None, decoder_attention_mask=None):
|
|
x = self.pianobart(input_ids_encoder, input_ids_decoder, encoder_attention_mask, decoder_attention_mask)
|
|
x = x.last_hidden_state
|
|
res = self.classifier(x)
|
|
return res
|
|
|
|
|
|
class PianoBART(
|
|
nn.Module,
|
|
PyTorchModelHubMixin
|
|
):
|
|
def __init__(self, max_position_embeddings=1024, hidden_size=1024, layers=8, heads=8, ffn_dims=2048):
|
|
super().__init__()
|
|
with open("./Octuple.pkl", 'rb') as f:
|
|
self.e2w, self.w2e = pickle.load(f)
|
|
self.config = BartConfig(max_position_embeddings=max_position_embeddings,
|
|
d_model=hidden_size,
|
|
encoder_layers=layers,
|
|
encoder_ffn_dim=ffn_dims,
|
|
encoder_attention_heads=heads,
|
|
decoder_layers=layers,
|
|
decoder_ffn_dim=ffn_dims,
|
|
decoder_attention_heads=heads
|
|
)
|
|
self.model = PianoBart(bartConfig=self.config, e2w=self.e2w, w2e=self.w2e)
|
|
|
|
|
|
def forward(self, input_ids_encoder, input_ids_decoder=None, encoder_attention_mask=None, decoder_attention_mask=None, output_hidden_states=True, generate=False):
|
|
return self.model(input_ids_encoder,input_ids_decoder,encoder_attention_mask,decoder_attention_mask,output_hidden_states,generate=False)
|
|
|