Update app.py
Browse files
app.py
CHANGED
@@ -10,17 +10,9 @@ device = 0 if torch.cuda.is_available() else -1
|
|
10 |
asr_pipeline = pipeline(model="openai/whisper-small", device=device)
|
11 |
|
12 |
# Function to handle the transcription process
|
13 |
-
def transcribe_audio(
|
14 |
-
#
|
15 |
-
|
16 |
-
temp_audio_file.write(audio_file.read())
|
17 |
-
temp_file_path = temp_audio_file.name
|
18 |
-
|
19 |
-
# Perform the transcription
|
20 |
-
transcription = asr_pipeline(temp_file_path)
|
21 |
-
|
22 |
-
# Remove the temporary file
|
23 |
-
os.remove(temp_file_path)
|
24 |
|
25 |
# Return the transcription result
|
26 |
return transcription['text']
|
@@ -28,7 +20,7 @@ def transcribe_audio(audio_file):
|
|
28 |
# Create Gradio interface
|
29 |
interface = gr.Interface(
|
30 |
fn=transcribe_audio, # The function to call when audio is uploaded
|
31 |
-
inputs=gr.Audio(type="
|
32 |
outputs="text", # Output type: text (transcription)
|
33 |
title="Whisper Audio Transcription", # Title of the Gradio interface
|
34 |
description="Upload an audio file to get a transcription using OpenAI's Whisper model"
|
|
|
10 |
asr_pipeline = pipeline(model="openai/whisper-small", device=device)
|
11 |
|
12 |
# Function to handle the transcription process
|
13 |
+
def transcribe_audio(audio_file_path):
|
14 |
+
# Perform the transcription using the audio file path
|
15 |
+
transcription = asr_pipeline(audio_file_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Return the transcription result
|
18 |
return transcription['text']
|
|
|
20 |
# Create Gradio interface
|
21 |
interface = gr.Interface(
|
22 |
fn=transcribe_audio, # The function to call when audio is uploaded
|
23 |
+
inputs=gr.Audio(type="filepath"), # Use 'filepath' to get the path to the audio file
|
24 |
outputs="text", # Output type: text (transcription)
|
25 |
title="Whisper Audio Transcription", # Title of the Gradio interface
|
26 |
description="Upload an audio file to get a transcription using OpenAI's Whisper model"
|