MYousafRana's picture
Update app.py
4cdaa05 verified
raw
history blame
849 Bytes
import gradio as gr
import torch
import whisper
# Load the Whisper model
model = whisper.load_model("large-v3")
def transcribe(audio):
"""Transcribes the given audio file."""
audio_path = audio if isinstance(audio, str) else audio.name
result = model.transcribe(audio_path)
return result["text"]
with gr.Blocks(fill_height=True) as demo:
with gr.Sidebar():
gr.Markdown("# Audio Transcription")
gr.Markdown("This demo uses the OpenAI Whisper-large-v3 model for audio transcription.")
gr.Markdown("### Upload an audio file to transcribe")
audio_input = gr.Audio(source="upload", type="filepath")
output_text = gr.Textbox(label="Transcription")
transcribe_button = gr.Button("Transcribe")
transcribe_button.click(fn=transcribe, inputs=audio_input, outputs=output_text)
demo.launch()