import gradio as gr | |
from transformers import pipeline | |
checkpoint="dreahim/whisper-small-Egyptian_ASR_3train_2dev_v1" | |
ASRPipeline = pipeline(task="automatic-speech-recognition", model=checkpoint) | |
def Recognize(audiofile): | |
sr, audio = audiofile | |
result=ASRPipeline(audio) | |
return result | |
demo= gr.Interface(fn=Recognize,title=checkpoint, inputs=gr.Audio(), outputs=gr.Textbox()) | |
if __name__ == "__main__": | |
demo.launch(share=True) |