Athspi commited on
Commit
177d844
·
verified ·
1 Parent(s): c993164

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -3,6 +3,7 @@ import torch
3
  from faster_whisper import WhisperModel
4
  import tempfile
5
  import logging
 
6
 
7
  # Set up logging
8
  logging.basicConfig(level=logging.INFO)
@@ -13,18 +14,18 @@ MODELS = {
13
  "Faster Whisper Medium": "Systran/faster-whisper-medium", # Use the medium model
14
  }
15
 
16
- def transcribe_live_audio(audio, model_size="Faster Whisper Medium"):
17
  """Transcribe live audio from the microphone."""
18
  try:
19
  # Save the live audio to a temporary file
20
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
21
  temp_audio_path = temp_audio.name
22
- audio.export(temp_audio_path, format="wav")
23
 
24
  # Load the appropriate model
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  compute_type = "float32" if device == "cuda" else "int8"
27
- model = WhisperModel(MODELS[model_size], device=device, compute_type=compute_type)
28
 
29
  # Transcribe the live audio
30
  segments, info = model.transcribe(
 
3
  from faster_whisper import WhisperModel
4
  import tempfile
5
  import logging
6
+ import os
7
 
8
  # Set up logging
9
  logging.basicConfig(level=logging.INFO)
 
14
  "Faster Whisper Medium": "Systran/faster-whisper-medium", # Use the medium model
15
  }
16
 
17
+ def transcribe_live_audio(audio):
18
  """Transcribe live audio from the microphone."""
19
  try:
20
  # Save the live audio to a temporary file
21
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
22
  temp_audio_path = temp_audio.name
23
+ audio.save(temp_audio_path) # Save the audio file
24
 
25
  # Load the appropriate model
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
27
  compute_type = "float32" if device == "cuda" else "int8"
28
+ model = WhisperModel(MODELS["Faster Whisper Medium"], device=device, compute_type=compute_type)
29
 
30
  # Transcribe the live audio
31
  segments, info = model.transcribe(