aoxo commited on
Commit
2067865
·
verified ·
1 Parent(s): 588e0b8

Upload eduport_tts_mal.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. eduport_tts_mal.py +25 -8
eduport_tts_mal.py CHANGED
@@ -7,6 +7,8 @@ import torchaudio
7
  from sklearn.model_selection import train_test_split
8
  from torchaudio.transforms import Resample
9
  from torch.amp import GradScaler, autocast
 
 
10
 
11
  # Compute max audio length from the training dataset
12
  def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
@@ -120,6 +122,9 @@ class SpeechRecognitionModel(torch.nn.Module):
120
  super().__init__()
121
  self.encoder = encoder
122
  self.decoder = decoder
 
 
 
123
 
124
  def forward(self, audio_input, text_input):
125
  # Extract encoder hidden states
@@ -136,13 +141,14 @@ class SpeechRecognitionModel(torch.nn.Module):
136
  outputs = self.decoder(
137
  input_ids=text_input,
138
  encoder_hidden_states=encoder_output,
139
- encoder_attention_mask=encoder_attention_mask
 
140
  )
141
 
142
  return outputs
143
 
144
  # Training Loop with Improved Mixed Precision
145
- def train_model(num_epochs=10):
146
  # Prepare the models
147
  # Use float32 for most of the model, let autocast handle precision
148
  encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
@@ -169,40 +175,51 @@ def train_model(num_epochs=10):
169
  scaler = GradScaler()
170
 
171
  # Training loop
172
- for epoch in range(num_epochs):
173
  model.train()
174
  train_loss = 0
175
- for audio_input, text_input in train_loader:
176
- optimizer.zero_grad()
 
 
177
 
178
  # Move tensors to device
179
  audio_input = audio_input.squeeze(1).to(device)
180
  text_input = text_input.to(device)
181
 
182
  # Use autocast for mixed precision training
183
- with autocast(dtype=torch.float16):
184
  # Forward pass
185
  output = model(audio_input, text_input)
186
 
187
  # Compute loss
188
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
189
 
 
190
  # Scaled loss for mixed precision
191
  scaler.scale(loss).backward()
192
- scaler.step(optimizer)
193
- scaler.update()
 
 
194
 
 
195
  train_loss += loss.item()
196
 
197
  # Validation step
198
  model.eval()
199
  val_loss = 0
 
 
 
200
  with torch.no_grad(), autocast(dtype=torch.float16):
201
  for audio_input, text_input in val_loader:
202
  audio_input = audio_input.to(device)
203
  text_input = text_input.to(device)
204
  output = model(audio_input, text_input)
205
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
 
 
206
  val_loss += loss.item()
207
 
208
  # Update scheduler
 
7
  from sklearn.model_selection import train_test_split
8
  from torchaudio.transforms import Resample
9
  from torch.amp import GradScaler, autocast
10
+ from tqdm import tqdm
11
+ from jiwer import wer
12
 
13
  # Compute max audio length from the training dataset
14
  def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
 
122
  super().__init__()
123
  self.encoder = encoder
124
  self.decoder = decoder
125
+
126
+ self.encoder.gradient_checkpointing_enable()
127
+ self.decoder.gradient_checkpointing_enable()
128
 
129
  def forward(self, audio_input, text_input):
130
  # Extract encoder hidden states
 
141
  outputs = self.decoder(
142
  input_ids=text_input,
143
  encoder_hidden_states=encoder_output,
144
+ encoder_attention_mask=encoder_attention_mask,
145
+ use_cache=False
146
  )
147
 
148
  return outputs
149
 
150
  # Training Loop with Improved Mixed Precision
151
+ def train_model(num_epochs=10, accumulation_steps=16):
152
  # Prepare the models
153
  # Use float32 for most of the model, let autocast handle precision
154
  encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
 
175
  scaler = GradScaler()
176
 
177
  # Training loop
178
+ for epoch in tqdm(range(num_epochs), desc="Epochs", position=0):
179
  model.train()
180
  train_loss = 0
181
+
182
+ train_progress = tqdm(train_loader, desc=f"Epoch {epoch+1}", position=1, leave=False)
183
+
184
+ for batch_idx, (audio_input, text_input) in enumerate(train_progress):
185
 
186
  # Move tensors to device
187
  audio_input = audio_input.squeeze(1).to(device)
188
  text_input = text_input.to(device)
189
 
190
  # Use autocast for mixed precision training
191
+ with autocast(dtype=torch.float16, device_type='cuda'):
192
  # Forward pass
193
  output = model(audio_input, text_input)
194
 
195
  # Compute loss
196
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
197
 
198
+ loss = loss / accumulation_steps
199
  # Scaled loss for mixed precision
200
  scaler.scale(loss).backward()
201
+ if (batch_idx + 1) % accumulation_steps == 0:
202
+ scaler.step(optimizer)
203
+ scaler.update()
204
+ optimizer.zero_grad()
205
 
206
+ train_progress.set_postfix({'loss': loss.item()})
207
  train_loss += loss.item()
208
 
209
  # Validation step
210
  model.eval()
211
  val_loss = 0
212
+
213
+ val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False)
214
+
215
  with torch.no_grad(), autocast(dtype=torch.float16):
216
  for audio_input, text_input in val_loader:
217
  audio_input = audio_input.to(device)
218
  text_input = text_input.to(device)
219
  output = model(audio_input, text_input)
220
  loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
221
+
222
+ val_progress.set_postfix({'loss': loss.item()})
223
  val_loss += loss.item()
224
 
225
  # Update scheduler