aoxo commited on
Commit
b3eb586
·
verified ·
1 Parent(s): 2067865

Update eduport_tts_mal.py

Browse files
Files changed (1) hide show
  1. eduport_tts_mal.py +47 -4
eduport_tts_mal.py CHANGED
@@ -4,11 +4,13 @@ from torch.utils.data import Dataset, DataLoader
4
  from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
5
  from transformers import Wav2Vec2Processor, Wav2Vec2Model
6
  import torchaudio
 
7
  from sklearn.model_selection import train_test_split
8
  from torchaudio.transforms import Resample
9
  from torch.amp import GradScaler, autocast
10
  from tqdm import tqdm
11
  from jiwer import wer
 
12
 
13
  # Compute max audio length from the training dataset
14
  def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
@@ -66,6 +68,22 @@ class SpeechDataset(Dataset):
66
  input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
67
 
68
  return input_values, input_ids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def collate_fn(batch):
71
  audio_inputs, text_inputs = zip(*batch)
@@ -103,7 +121,7 @@ print(max_audio_length)
103
  train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42)
104
 
105
  # Define your dataset and dataloaders
106
- train_dataset = SpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
107
  val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
108
 
109
  # Update your DataLoader to use the custom collate_fn
@@ -147,6 +165,30 @@ class SpeechRecognitionModel(torch.nn.Module):
147
 
148
  return outputs
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  # Training Loop with Improved Mixed Precision
151
  def train_model(num_epochs=10, accumulation_steps=16):
152
  # Prepare the models
@@ -169,7 +211,8 @@ def train_model(num_epochs=10, accumulation_steps=16):
169
 
170
  # Optimizer and learning rate scheduler
171
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
172
- scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
 
173
 
174
  # Gradient scaler for mixed precision training
175
  scaler = GradScaler()
@@ -193,7 +236,7 @@ def train_model(num_epochs=10, accumulation_steps=16):
193
  output = model(audio_input, text_input)
194
 
195
  # Compute loss
196
- loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
197
 
198
  loss = loss / accumulation_steps
199
  # Scaled loss for mixed precision
@@ -209,7 +252,7 @@ def train_model(num_epochs=10, accumulation_steps=16):
209
  # Validation step
210
  model.eval()
211
  val_loss = 0
212
-
213
  val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False)
214
 
215
  with torch.no_grad(), autocast(dtype=torch.float16):
 
4
  from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
5
  from transformers import Wav2Vec2Processor, Wav2Vec2Model
6
  import torchaudio
7
+ from torch.optim.lr_scheduler import CosineAnnealingLR
8
  from sklearn.model_selection import train_test_split
9
  from torchaudio.transforms import Resample
10
  from torch.amp import GradScaler, autocast
11
  from tqdm import tqdm
12
  from jiwer import wer
13
+ from torchaudio.transforms import TimeStretch, PitchShift
14
 
15
  # Compute max audio length from the training dataset
16
  def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
 
68
  input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
69
 
70
  return input_values, input_ids
71
+
72
+ class AugmentedSpeechDataset(SpeechDataset):
73
+ def __getitem__(self, idx):
74
+ input_values, input_ids = super().__getitem__(idx)
75
+
76
+ # Random time stretching
77
+ if torch.rand(1).item() < 0.3:
78
+ time_stretch = TimeStretch(factor=torch.uniform(0.8, 1.2))
79
+ input_values = time_stretch(input_values)
80
+
81
+ # Random pitch shift
82
+ if torch.rand(1).item() < 0.3:
83
+ pitch_shift = PitchShift(sample_rate=self.target_sampling_rate, n_steps=torch.randint(-2, 2, (1,)))
84
+ input_values = pitch_shift(input_values)
85
+
86
+ return input_values, input_ids
87
 
88
  def collate_fn(batch):
89
  audio_inputs, text_inputs = zip(*batch)
 
121
  train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42)
122
 
123
  # Define your dataset and dataloaders
124
+ train_dataset = AugmentedSpeechDatasetSpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
125
  val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
126
 
127
  # Update your DataLoader to use the custom collate_fn
 
165
 
166
  return outputs
167
 
168
+ def compute_wer(model, val_loader, device, tokenizer):
169
+ model.eval()
170
+ total_wer = 0
171
+ with torch.no_grad():
172
+ for audio_input, text_input in val_loader:
173
+ audio_input = audio_input.to(device)
174
+ text_input = text_input.to(device)
175
+
176
+ # Generate predictions
177
+ outputs = model.decoder.generate(
178
+ encoder_hidden_states=model.encoder(audio_input).last_hidden_state,
179
+ max_length=text_input.size(1)
180
+ )
181
+
182
+ # Convert predictions and ground truth to text
183
+ predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True)
184
+ ground_truth = tokenizer.batch_decode(text_input, skip_special_tokens=True)
185
+
186
+ # Compute WER
187
+ batch_wer = wer(ground_truth, predictions)
188
+ total_wer += batch_wer
189
+
190
+ return total_wer / len(val_loader)
191
+
192
  # Training Loop with Improved Mixed Precision
193
  def train_model(num_epochs=10, accumulation_steps=16):
194
  # Prepare the models
 
211
 
212
  # Optimizer and learning rate scheduler
213
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
214
+ # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
215
+ scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=1e-6)
216
 
217
  # Gradient scaler for mixed precision training
218
  scaler = GradScaler()
 
236
  output = model(audio_input, text_input)
237
 
238
  # Compute loss
239
+ loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1), ignore_index = tokenizer.pad_token_id, label_smoothing=0.1)
240
 
241
  loss = loss / accumulation_steps
242
  # Scaled loss for mixed precision
 
252
  # Validation step
253
  model.eval()
254
  val_loss = 0
255
+ wer = compute_wer(model, val_loader, device, tokenizer)
256
  val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False)
257
 
258
  with torch.no_grad(), autocast(dtype=torch.float16):