Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,26 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
with gr.Blocks(fill_height=True) as demo:
|
4 |
with gr.Sidebar():
|
5 |
-
gr.Markdown("#
|
6 |
-
gr.Markdown("This
|
7 |
-
button = gr.LoginButton("Sign in")
|
8 |
-
gr.load("models/openai/whisper-large-v3", accept_token=button, provider="fal-ai")
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import whisper
|
4 |
+
|
5 |
+
# Load the Whisper model
|
6 |
+
model = whisper.load_model("large-v3")
|
7 |
+
|
8 |
+
def transcribe(audio):
|
9 |
+
"""Transcribes the given audio file."""
|
10 |
+
audio_path = audio if isinstance(audio, str) else audio.name
|
11 |
+
result = model.transcribe(audio_path)
|
12 |
+
return result["text"]
|
13 |
|
14 |
with gr.Blocks(fill_height=True) as demo:
|
15 |
with gr.Sidebar():
|
16 |
+
gr.Markdown("# Audio Transcription")
|
17 |
+
gr.Markdown("This demo uses the OpenAI Whisper-large-v3 model for audio transcription.")
|
|
|
|
|
18 |
|
19 |
+
gr.Markdown("### Upload an audio file to transcribe")
|
20 |
+
audio_input = gr.Audio(source="upload", type="filepath")
|
21 |
+
output_text = gr.Textbox(label="Transcription")
|
22 |
+
transcribe_button = gr.Button("Transcribe")
|
23 |
+
|
24 |
+
transcribe_button.click(fn=transcribe, inputs=audio_input, outputs=output_text)
|
25 |
+
|
26 |
demo.launch()
|