import gradio as gr import whisper # Load the Whisper model model = whisper.load_model("large-v3") def transcribe(audio): """Transcribes the given audio file.""" if audio is None: return "Please upload an audio file." result = model.transcribe(audio) return result["text"] with gr.Blocks() as demo: gr.Markdown("# Audio Transcription") gr.Markdown("This demo uses the OpenAI Whisper-large-v3 model for audio transcription.") audio_input = gr.Audio(type="filepath") # Corrected transcribe_button = gr.Button("Transcribe") output_text = gr.Textbox(label="Transcription") transcribe_button.click(fn=transcribe, inputs=audio_input, outputs=output_text) demo.launch()