aoxo commited on
Commit
e5c4a9b
·
verified ·
1 Parent(s): 95f228e

Create eduport_tts_mal.py

Browse files
Files changed (1) hide show
  1. eduport_tts_mal.py +191 -0
eduport_tts_mal.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch.utils.data import Dataset, DataLoader
4
+ from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
5
+ from transformers import Wav2Vec2Processor, Wav2Vec2Model
6
+ import torchaudio
7
+ from sklearn.model_selection import train_test_split
8
+ from torchaudio.transforms import Resample
9
+
10
+ # Compute max audio length from the training dataset
11
+ def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
12
+ max_length = 0
13
+ for audio_path in audio_files:
14
+ waveform, sample_rate = torchaudio.load(audio_path)
15
+ if sample_rate != target_sampling_rate:
16
+ waveform = resampler(waveform)
17
+ # max_length = max(max_length, waveform.size(1)) # Max length based on time dimension
18
+ max_length = 1176240
19
+ return max_length
20
+
21
+ class SpeechDataset(Dataset):
22
+ def __init__(self, audio_files, transcript_files, tokenizer, processor, max_length=512, target_sampling_rate=16000, max_audio_length=None):
23
+ self.audio_files = audio_files
24
+ self.transcript_files = transcript_files
25
+ self.tokenizer = tokenizer
26
+ self.processor = processor
27
+ self.max_length = max_length
28
+ self.target_sampling_rate = target_sampling_rate
29
+ self.max_audio_length = max_audio_length # Max length of audio
30
+ self.resampler = Resample(new_freq=self.target_sampling_rate)
31
+
32
+ def __len__(self):
33
+ return len(self.audio_files)
34
+
35
+ def __getitem__(self, idx):
36
+ audio_path = self.audio_files[idx]
37
+ transcript_path = self.transcript_files[idx]
38
+
39
+ # Load and process the audio
40
+ waveform, sample_rate = torchaudio.load(audio_path)
41
+
42
+ # If the audio sample rate is not 16kHz, resample it
43
+ if sample_rate != self.target_sampling_rate:
44
+ waveform = self.resampler(waveform)
45
+
46
+ # Pass the waveform to the Wav2Vec2 processor
47
+ input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0)
48
+
49
+ # Pad or truncate the audio to ensure fixed length (the longest audio length)
50
+ if input_values.size(0) < self.max_audio_length:
51
+ padding_length = self.max_audio_length - input_values.size(0)
52
+ # Pad along the time dimension (dim=1)
53
+ input_values = torch.cat([input_values, torch.zeros(1, padding_length)], dim=1)
54
+ else:
55
+ input_values = input_values[:, :self.max_audio_length] # Truncate to max_audio_length
56
+
57
+ # Load and process the transcript
58
+ with open(transcript_path, 'r') as file:
59
+ transcript = file.read().strip()
60
+
61
+ # Encode the transcript using the GPT2 tokenizer
62
+ input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
63
+
64
+ return input_values, input_ids
65
+
66
+ def collate_fn(batch):
67
+ audio_inputs, text_inputs = zip(*batch)
68
+
69
+ # Pad audio inputs to the maximum length in the batch
70
+ max_audio_len = max([audio.size(1) for audio in audio_inputs])
71
+ audio_inputs_padded = torch.stack([torch.cat([audio, torch.zeros(1, max_audio_len - audio.size(1))], dim=1) if audio.size(1) < max_audio_len else audio[:, :max_audio_len] for audio in audio_inputs])
72
+
73
+ # Pad text inputs to the longest transcript length
74
+ max_text_len = max([text.size(0) for text in text_inputs])
75
+ text_inputs_padded = torch.stack([torch.cat([text, torch.tensor([0] * (max_text_len - text.size(0)))], dim=0) if text.size(0) < max_text_len else text[:max_text_len] for text in text_inputs])
76
+
77
+ return audio_inputs_padded, text_inputs_padded
78
+
79
+ # Tokenizer and processor
80
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
81
+ processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h')
82
+
83
+ tokenizer.pad_token = tokenizer.eos_token
84
+
85
+ # Data preparation
86
+ wav_folder = './wav'
87
+ transcript_folder = './transcription'
88
+
89
+ # Load audio files and transcripts
90
+ audio_files = [os.path.join(wav_folder, f) for f in os.listdir(wav_folder)]
91
+ transcript_files = [os.path.join(transcript_folder, f.replace('.wav', '.txt')) for f in os.listdir(wav_folder)]
92
+
93
+ # Now compute the max audio length
94
+ resampler = Resample(new_freq=16000) # Assuming resampling to 16kHz
95
+ max_audio_length = compute_max_audio_length(audio_files, resampler, target_sampling_rate=16000)
96
+ print(max_audio_length)
97
+
98
+ # Split the dataset into train, val, and test
99
+ train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42)
100
+
101
+ # Define your dataset and dataloaders
102
+ train_dataset = SpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
103
+ val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
104
+
105
+ # Update your DataLoader to use the custom collate_fn
106
+ train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn)
107
+ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
108
+
109
+ # Model Architecture
110
+ encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
111
+ # Modify the decoder configuration
112
+ decoder_config = GPT2Config(
113
+ vocab_size=len(tokenizer),
114
+ add_cross_attention=True # Add this line to enable cross-attention
115
+ )
116
+ decoder = GPT2LMHeadModel(config=decoder_config)
117
+
118
+ class SpeechRecognitionModel(torch.nn.Module):
119
+ def __init__(self, encoder, decoder):
120
+ super().__init__()
121
+ self.encoder = encoder
122
+ self.decoder = decoder
123
+
124
+ def forward(self, audio_input, text_input):
125
+ # Extract encoder hidden states
126
+ encoder_output = self.encoder(audio_input).last_hidden_state
127
+
128
+ # Create an attention mask for the encoder output
129
+ encoder_attention_mask = torch.ones(
130
+ encoder_output.shape[:2],
131
+ dtype=torch.long,
132
+ device=encoder_output.device
133
+ )
134
+
135
+ # Forward pass through the decoder with cross-attention
136
+ outputs = self.decoder(
137
+ input_ids=text_input,
138
+ encoder_hidden_states=encoder_output,
139
+ encoder_attention_mask=encoder_attention_mask
140
+ )
141
+
142
+ return outputs
143
+
144
+ # Instantiate the model
145
+ model = SpeechRecognitionModel(encoder, decoder)
146
+
147
+ # Optimizer and scheduler
148
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
149
+ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
150
+
151
+ # Training Loop
152
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
153
+ model.to(device)
154
+
155
+ num_epochs = 10
156
+
157
+ for epoch in range(num_epochs):
158
+ model.train()
159
+ train_loss = 0
160
+ for audio_input, text_input in train_loader:
161
+ optimizer.zero_grad()
162
+
163
+ # Move tensors to the appropriate device
164
+ audio_input = audio_input.squeeze(1).to(device)
165
+ text_input = text_input.to(device)
166
+
167
+ # Forward pass
168
+ output = model(audio_input, text_input)
169
+
170
+ # Compute loss
171
+ loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
172
+ loss.backward()
173
+ optimizer.step()
174
+
175
+ train_loss += loss.item()
176
+
177
+ # Validation step
178
+ model.eval()
179
+ val_loss = 0
180
+ with torch.no_grad():
181
+ for audio_input, text_input in val_loader:
182
+ audio_input = audio_input.to(device)
183
+ text_input = text_input.to(device)
184
+ output = model(audio_input, text_input)
185
+ loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
186
+ val_loss += loss.item()
187
+
188
+ # Update scheduler
189
+ scheduler.step(val_loss)
190
+
191
+ print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}')