from transformers import pipeline import gradio as gr import os import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") auth_token = os.environ.get("hf_token") if not auth_token: raise ValueError("Hugging Face token is missing! Add it as a secret.") pipe = pipeline(model="fahadqazi/whisper-small-sindhi", device=device, token=auth_token) # change to "your-username/the-name-you-picked" def transcribe(audio): text = pipe(audio)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(type="filepath"), outputs="text", title="Whisper Small Sindhi", description="Realtime demo for Sindhi speech recognition using a fine-tuned Whisper small model.", ) iface.launch()