aoxo commited on
Commit
2d3a567
·
verified ·
1 Parent(s): e5c4a9b

Upload eduport_tts_mal.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. eduport_tts_mal.py +22 -37
eduport_tts_mal.py CHANGED
@@ -14,8 +14,7 @@ def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
14
  waveform, sample_rate = torchaudio.load(audio_path)
15
  if sample_rate != target_sampling_rate:
16
  waveform = resampler(waveform)
17
- # max_length = max(max_length, waveform.size(1)) # Max length based on time dimension
18
- max_length = 1176240
19
  return max_length
20
 
21
  class SpeechDataset(Dataset):
@@ -35,45 +34,47 @@ class SpeechDataset(Dataset):
35
  def __getitem__(self, idx):
36
  audio_path = self.audio_files[idx]
37
  transcript_path = self.transcript_files[idx]
38
-
39
  # Load and process the audio
40
  waveform, sample_rate = torchaudio.load(audio_path)
41
-
42
  # If the audio sample rate is not 16kHz, resample it
43
  if sample_rate != self.target_sampling_rate:
44
  waveform = self.resampler(waveform)
45
-
46
  # Pass the waveform to the Wav2Vec2 processor
47
  input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0)
48
-
49
  # Pad or truncate the audio to ensure fixed length (the longest audio length)
50
  if input_values.size(0) < self.max_audio_length:
51
  padding_length = self.max_audio_length - input_values.size(0)
52
- # Pad along the time dimension (dim=1)
53
- input_values = torch.cat([input_values, torch.zeros(1, padding_length)], dim=1)
 
54
  else:
55
  input_values = input_values[:, :self.max_audio_length] # Truncate to max_audio_length
56
 
 
57
  # Load and process the transcript
58
  with open(transcript_path, 'r') as file:
59
  transcript = file.read().strip()
60
-
61
  # Encode the transcript using the GPT2 tokenizer
62
  input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
63
 
64
  return input_values, input_ids
65
-
66
  def collate_fn(batch):
67
  audio_inputs, text_inputs = zip(*batch)
68
-
69
  # Pad audio inputs to the maximum length in the batch
70
  max_audio_len = max([audio.size(1) for audio in audio_inputs])
71
  audio_inputs_padded = torch.stack([torch.cat([audio, torch.zeros(1, max_audio_len - audio.size(1))], dim=1) if audio.size(1) < max_audio_len else audio[:, :max_audio_len] for audio in audio_inputs])
72
-
73
  # Pad text inputs to the longest transcript length
74
  max_text_len = max([text.size(0) for text in text_inputs])
75
  text_inputs_padded = torch.stack([torch.cat([text, torch.tensor([0] * (max_text_len - text.size(0)))], dim=0) if text.size(0) < max_text_len else text[:max_text_len] for text in text_inputs])
76
-
77
  return audio_inputs_padded, text_inputs_padded
78
 
79
  # Tokenizer and processor
@@ -108,11 +109,8 @@ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=col
108
 
109
  # Model Architecture
110
  encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
111
- # Modify the decoder configuration
112
- decoder_config = GPT2Config(
113
- vocab_size=len(tokenizer),
114
- add_cross_attention=True # Add this line to enable cross-attention
115
- )
116
  decoder = GPT2LMHeadModel(config=decoder_config)
117
 
118
  class SpeechRecognitionModel(torch.nn.Module):
@@ -122,24 +120,9 @@ class SpeechRecognitionModel(torch.nn.Module):
122
  self.decoder = decoder
123
 
124
  def forward(self, audio_input, text_input):
125
- # Extract encoder hidden states
126
  encoder_output = self.encoder(audio_input).last_hidden_state
127
-
128
- # Create an attention mask for the encoder output
129
- encoder_attention_mask = torch.ones(
130
- encoder_output.shape[:2],
131
- dtype=torch.long,
132
- device=encoder_output.device
133
- )
134
-
135
- # Forward pass through the decoder with cross-attention
136
- outputs = self.decoder(
137
- input_ids=text_input,
138
- encoder_hidden_states=encoder_output,
139
- encoder_attention_mask=encoder_attention_mask
140
- )
141
-
142
- return outputs
143
 
144
  # Instantiate the model
145
  model = SpeechRecognitionModel(encoder, decoder)
@@ -166,7 +149,7 @@ for epoch in range(num_epochs):
166
 
167
  # Forward pass
168
  output = model(audio_input, text_input)
169
-
170
  # Compute loss
171
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
172
  loss.backward()
@@ -188,4 +171,6 @@ for epoch in range(num_epochs):
188
  # Update scheduler
189
  scheduler.step(val_loss)
190
 
191
- print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}')
 
 
 
14
  waveform, sample_rate = torchaudio.load(audio_path)
15
  if sample_rate != target_sampling_rate:
16
  waveform = resampler(waveform)
17
+ max_length = max(max_length, waveform.size(1)) # Max length based on time dimension
 
18
  return max_length
19
 
20
  class SpeechDataset(Dataset):
 
34
  def __getitem__(self, idx):
35
  audio_path = self.audio_files[idx]
36
  transcript_path = self.transcript_files[idx]
37
+
38
  # Load and process the audio
39
  waveform, sample_rate = torchaudio.load(audio_path)
40
+
41
  # If the audio sample rate is not 16kHz, resample it
42
  if sample_rate != self.target_sampling_rate:
43
  waveform = self.resampler(waveform)
44
+
45
  # Pass the waveform to the Wav2Vec2 processor
46
  input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0)
47
+
48
  # Pad or truncate the audio to ensure fixed length (the longest audio length)
49
  if input_values.size(0) < self.max_audio_length:
50
  padding_length = self.max_audio_length - input_values.size(0)
51
+ # Create a zero tensor with the same batch size (1) and the padding length along dimension 1
52
+ padding = torch.zeros(1, padding_length)
53
+ input_values = torch.cat([input_values, padding], dim=1)
54
  else:
55
  input_values = input_values[:, :self.max_audio_length] # Truncate to max_audio_length
56
 
57
+
58
  # Load and process the transcript
59
  with open(transcript_path, 'r') as file:
60
  transcript = file.read().strip()
61
+
62
  # Encode the transcript using the GPT2 tokenizer
63
  input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
64
 
65
  return input_values, input_ids
66
+
67
  def collate_fn(batch):
68
  audio_inputs, text_inputs = zip(*batch)
69
+
70
  # Pad audio inputs to the maximum length in the batch
71
  max_audio_len = max([audio.size(1) for audio in audio_inputs])
72
  audio_inputs_padded = torch.stack([torch.cat([audio, torch.zeros(1, max_audio_len - audio.size(1))], dim=1) if audio.size(1) < max_audio_len else audio[:, :max_audio_len] for audio in audio_inputs])
73
+
74
  # Pad text inputs to the longest transcript length
75
  max_text_len = max([text.size(0) for text in text_inputs])
76
  text_inputs_padded = torch.stack([torch.cat([text, torch.tensor([0] * (max_text_len - text.size(0)))], dim=0) if text.size(0) < max_text_len else text[:max_text_len] for text in text_inputs])
77
+
78
  return audio_inputs_padded, text_inputs_padded
79
 
80
  # Tokenizer and processor
 
109
 
110
  # Model Architecture
111
  encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
112
+ decoder_config = GPT2Config(vocab_size=len(tokenizer))
113
+ decoder_config.add_cross_attention=True
 
 
 
114
  decoder = GPT2LMHeadModel(config=decoder_config)
115
 
116
  class SpeechRecognitionModel(torch.nn.Module):
 
120
  self.decoder = decoder
121
 
122
  def forward(self, audio_input, text_input):
 
123
  encoder_output = self.encoder(audio_input).last_hidden_state
124
+ decoder_output = self.decoder(text_input, encoder_hidden_states=encoder_output)
125
+ return decoder_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  # Instantiate the model
128
  model = SpeechRecognitionModel(encoder, decoder)
 
149
 
150
  # Forward pass
151
  output = model(audio_input, text_input)
152
+
153
  # Compute loss
154
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
155
  loss.backward()
 
171
  # Update scheduler
172
  scheduler.step(val_loss)
173
 
174
+ print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}')
175
+
176
+ # Inference (use the same process as the training loop to handle inference)