voicemenuloginn / app.py
geethareddy's picture
Update app.py
7494646 verified
raw
history blame
1.79 kB
import gradio as gr
import speech_recognition as sr
import torch
from transformers import pipeline
# Load ASR model (Whisper)
device = "cuda" if torch.cuda.is_available() else "cpu"
speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if device == "cuda" else -1)
# Initialize Speech Recognition
recognizer = sr.Recognizer()
# Function to Capture Name
def capture_name(audio):
try:
text = speech_to_text(audio)["text"]
return f"Name Captured: {text}", "Please provide your email address."
except Exception as e:
return f"Error: {str(e)}", ""
# Function to Capture Email
def capture_email(audio):
try:
text = speech_to_text(audio)["text"]
return f"Email Captured: {text}"
except Exception as e:
return f"Error: {str(e)}"
# Gradio Interface
def gradio_interface():
with gr.Blocks() as demo:
gr.Markdown("### 🎙️ Welcome to Biryani Hub")
with gr.Column():
gr.Markdown("#### Step 1: Tell me your name")
audio_input_name = gr.Audio(type="filepath", label="Record your Name")
name_output = gr.Textbox(label="Your Name:")
email_prompt_output = gr.Textbox(label="Next Step:", interactive=False)
audio_input_name.change(capture_name, inputs=audio_input_name, outputs=[name_output, email_prompt_output])
gr.Markdown("#### Step 2: Provide your email")
audio_input_email = gr.Audio(type="filepath", label="Record your Email")
email_output = gr.Textbox(label="Your Email:")
audio_input_email.change(capture_email, inputs=audio_input_email, outputs=email_output)
return demo
# Launch the Gradio Interface
demo = gradio_interface()
demo.launch(debug=True)