File size: 12,482 Bytes
e5c4a9b
 
 
 
 
 
b3eb586
e5c4a9b
 
588e0b8
2067865
 
ac5ba66
e5c4a9b
 
 
 
 
 
 
 
2d3a567
e5c4a9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d3a567
e5c4a9b
 
2d3a567
e5c4a9b
 
 
2d3a567
e5c4a9b
 
2d3a567
e5c4a9b
67b4ec5
 
2d3a567
67b4ec5
2d3a567
e5c4a9b
 
 
2d3a567
e5c4a9b
 
 
2d3a567
e5c4a9b
 
 
 
2d3a567
e5c4a9b
 
2d3a567
dbc152d
 
 
e5c4a9b
 
371721a
 
 
 
dbc152d
371721a
 
 
 
 
dbc152d
371721a
2d3a567
dbc152d
e5c4a9b
dbc152d
 
 
 
 
 
2d3a567
e5c4a9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0639013
 
e5c4a9b
 
 
 
 
 
 
2d3a567
 
e5c4a9b
 
17f4e46
e5c4a9b
 
 
 
 
2067865
 
 
e5c4a9b
 
dbc152d
 
 
 
17f4e46
e5c4a9b
17f4e46
 
 
 
 
 
 
 
 
 
 
 
2067865
 
17f4e46
 
 
e5c4a9b
b3eb586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17f4e46
2067865
17f4e46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3eb586
 
17f4e46
 
 
 
 
2067865
17f4e46
 
2067865
 
 
 
17f4e46
 
6f4b789
dbc152d
 
 
17f4e46
e5c4a9b
17f4e46
2067865
17f4e46
 
e5c4a9b
17f4e46
00c46fa
e5c4a9b
2067865
17f4e46
 
2067865
 
 
 
e5c4a9b
2067865
17f4e46
e5c4a9b
e203b0c
 
 
 
 
 
17f4e46
 
 
b3eb586
2067865
 
17f4e46
 
 
 
 
00c46fa
2067865
 
17f4e46
e5c4a9b
17f4e46
 
e5c4a9b
e54dde4
2d3a567
ac5ba66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17f4e46
67b4ec5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
import os
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
from transformers import Wav2Vec2Processor, Wav2Vec2Model
import torchaudio
from torch.optim.lr_scheduler import CosineAnnealingLR
from sklearn.model_selection import train_test_split
from torchaudio.transforms import Resample
from torch.amp import GradScaler, autocast
from tqdm import tqdm
from jiwer import wer
from safetensors.torch import save_file

# Compute max audio length from the training dataset
def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
    max_length = 0
    for audio_path in audio_files:
        waveform, sample_rate = torchaudio.load(audio_path)
        if sample_rate != target_sampling_rate:
            waveform = resampler(waveform)
        max_length = max(max_length, waveform.size(1))  # Max length based on time dimension
    return max_length

class SpeechDataset(Dataset):
    def __init__(self, audio_files, transcript_files, tokenizer, processor, max_length=512, target_sampling_rate=16000, max_audio_length=None):
        self.audio_files = audio_files
        self.transcript_files = transcript_files
        self.tokenizer = tokenizer
        self.processor = processor
        self.max_length = max_length
        self.target_sampling_rate = target_sampling_rate
        self.max_audio_length = max_audio_length  # Max length of audio
        self.resampler = Resample(new_freq=self.target_sampling_rate)

    def __len__(self):
        return len(self.audio_files)

    def __getitem__(self, idx):
        audio_path = self.audio_files[idx]
        transcript_path = self.transcript_files[idx]
        
        # Load and process the audio
        waveform, sample_rate = torchaudio.load(audio_path)
        
        # If the audio sample rate is not 16kHz, resample it
        if sample_rate != self.target_sampling_rate:
            waveform = self.resampler(waveform)
        
        # Pass the waveform to the Wav2Vec2 processor
        input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0)
        
        # Pad or truncate the audio to ensure fixed length (the longest audio length)
        if input_values.size(1) < self.max_audio_length:
            padding_length = self.max_audio_length - input_values.size(1)
            # Create a zero tensor with the same batch size (1) and the padding length along dimension 1
            padding = torch.zeros_like(input_values[:, :padding_length])
            input_values = torch.cat([input_values, padding], dim=1)
        else:
            input_values = input_values[:, :self.max_audio_length]  # Truncate to max_audio_length


        # Load and process the transcript
        with open(transcript_path, 'r') as file:
            transcript = file.read().strip()
        
        # Encode the transcript using the GPT2 tokenizer
        input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)

        return input_values, input_ids
        
def collate_fn(batch):
    audio_inputs, text_inputs = zip(*batch)
    
    # Ensure audio inputs are 3D
    audio_inputs = [audio.squeeze(0) if audio.dim() == 3 else audio for audio in audio_inputs]
    
    # Pad audio inputs to the maximum length in the batch
    max_audio_len = max([audio.size(1) for audio in audio_inputs])
    
    audio_inputs_padded = []
    for audio in audio_inputs:
        if audio.size(1) < max_audio_len:
            padding = torch.zeros(audio.size(0), max_audio_len - audio.size(1), device=audio.device)
            padded_audio = torch.cat([audio, padding], dim=1)
            audio_inputs_padded.append(padded_audio)
        else:
            audio_inputs_padded.append(audio[:, :max_audio_len])
    
    # Stack audio inputs
    audio_inputs_padded = torch.stack(audio_inputs_padded)
    
    # Pad text inputs
    max_text_len = max([text.size(0) for text in text_inputs])
    text_inputs_padded = torch.stack([
        torch.cat([text, torch.zeros(max_text_len - text.size(0), dtype=text.dtype)], dim=0) 
        if text.size(0) < max_text_len 
        else text[:max_text_len] 
        for text in text_inputs
    ])
    
    return audio_inputs_padded, text_inputs_padded

# Tokenizer and processor
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h')

tokenizer.pad_token = tokenizer.eos_token

# Data preparation
wav_folder = './wav'
transcript_folder = './transcription'

# Load audio files and transcripts
audio_files = [os.path.join(wav_folder, f) for f in os.listdir(wav_folder)]
transcript_files = [os.path.join(transcript_folder, f.replace('.wav', '.txt')) for f in os.listdir(wav_folder)]

# Now compute the max audio length
resampler = Resample(new_freq=16000)  # Assuming resampling to 16kHz
max_audio_length = compute_max_audio_length(audio_files, resampler, target_sampling_rate=16000)
print(max_audio_length)

# Split the dataset into train, val, and test
train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42)

# Define your dataset and dataloaders
train_dataset = SpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length)

# Update your DataLoader to use the custom collate_fn
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)

# Model Architecture
encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
decoder_config = GPT2Config(vocab_size=len(tokenizer))
decoder_config.add_cross_attention=True
decoder = GPT2LMHeadModel(config=decoder_config)

# Model Architecture with Improved FP16 Support
class SpeechRecognitionModel(torch.nn.Module):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder
        
        self.encoder.gradient_checkpointing_enable()
        self.decoder.gradient_checkpointing_enable()

    def forward(self, audio_input, text_input):
        # Ensure audio_input is 3D: [batch_size, channels, time]
        if audio_input.dim() == 4:
            audio_input = audio_input.squeeze(1)  # Remove extra dimension
        
        # Extract encoder hidden states
        encoder_output = self.encoder(audio_input).last_hidden_state
        
        # Create an attention mask for the encoder output
        encoder_attention_mask = torch.ones(
            encoder_output.shape[:2], 
            dtype=torch.long, 
            device=encoder_output.device
        )

        # Forward pass through the decoder with cross-attention
        outputs = self.decoder(
            input_ids=text_input,
            encoder_hidden_states=encoder_output,
            encoder_attention_mask=encoder_attention_mask,
            use_cache=False
        )
        
        return outputs

def compute_wer(model, val_loader, device, tokenizer):
    model.eval()
    total_wer = 0
    with torch.no_grad():
        for audio_input, text_input in val_loader:
            audio_input = audio_input.to(device)
            text_input = text_input.to(device)
            
            # Generate predictions
            outputs = model.decoder.generate(
                encoder_hidden_states=model.encoder(audio_input).last_hidden_state,
                max_length=text_input.size(1)
            )
            
            # Convert predictions and ground truth to text
            predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True)
            ground_truth = tokenizer.batch_decode(text_input, skip_special_tokens=True)
            
            # Compute WER
            batch_wer = wer(ground_truth, predictions)
            total_wer += batch_wer
    
    return total_wer / len(val_loader)

# Training Loop with Improved Mixed Precision
def train_model(num_epochs=10, accumulation_steps=16):
    # Prepare the models
    # Use float32 for most of the model, let autocast handle precision
    encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
    
    # Modify the decoder configuration
    decoder_config = GPT2Config(
        vocab_size=len(tokenizer), 
        add_cross_attention=True
    )
    decoder = GPT2LMHeadModel(config=decoder_config)

    # Initialize the model
    model = SpeechRecognitionModel(encoder, decoder)

    # Move to GPU 
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)

    # Optimizer and learning rate scheduler
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
    scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=1e-6)

    # Gradient scaler for mixed precision training
    scaler = GradScaler()

    # Training loop
    for epoch in tqdm(range(num_epochs), desc="Epochs", position=0):
        model.train()
        train_loss = 0
        
        train_progress = tqdm(train_loader, desc=f"Epoch {epoch+1}", position=1, leave=False)
        
        for batch_idx, (audio_input, text_input) in enumerate(train_progress):

            # Move tensors to device
            audio_input = audio_input.squeeze(1).to(device)
            # Squeeze or reshape if necessary
            if audio_input.dim() == 4:
                audio_input = audio_input.squeeze(1)  # Remove extra singleton dimension
            text_input = text_input.to(device)

            # Use autocast for mixed precision training
            with autocast(dtype=torch.float16, device_type='cuda'):
                # Forward pass
                output = model(audio_input, text_input)

                # Compute loss
                loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))

            loss = loss / accumulation_steps
            # Scaled loss for mixed precision
            scaler.scale(loss).backward()
            if (batch_idx + 1) % accumulation_steps == 0:
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()

            train_progress.set_postfix({'loss': loss.item()})
            train_loss += loss.item()

        # Ensure gradients are stepped for any remaining batches
        if batch_idx % accumulation_steps != 0:
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad()

        # Validation step
        model.eval()
        val_loss = 0
        wer = compute_wer(model, val_loader, device, tokenizer)
        val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False)
        
        with torch.no_grad(), autocast(dtype=torch.float16):
            for audio_input, text_input in val_loader:
                audio_input = audio_input.to(device)
                text_input = text_input.to(device)
                output = model(audio_input, text_input)
                loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
                
                val_progress.set_postfix({'loss': loss.item()})
                val_loss += loss.item()

        # Update scheduler
        scheduler.step(val_loss)

        print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}, WER: {wer}')

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            
            # Save model state_dict to .safetensors
            save_file(model.state_dict(), 'best_model.safetensors')
            
            # Save optimizer state_dict and metadata separately
            torch.save({
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch,
                'loss': val_loss,
                'model_class': model.__class__.__name__,  # Save the class name
                'model_architecture': model.to(torch.device('cpu'))  # Save model architecture (serialized separately)
            }, 'metadata.pth')

# Run the training
train_model()