import gradio as gr import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline model_id = "KingNish/whisper-small-en" pipe = pipeline( "automatic-speech-recognition", model=model_id ) def transcribe(audio): text = pipe(audio)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(type="filepath"), outputs="text", title="Whisper Small Hindi", description="Realtime demo for Hindi speech recognition using a fine-tuned Whisper small model.", ) iface.launch()