from huggingsound import SpeechRecognitionModel
from transformers import pipeline
import gradio as gr


#model = SpeechRecognitionModel("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
pipe = pipeline("automatic-speech-recognition", "patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")


def transcribe(audio, state=""):    
    #transcriptions_es = model.transcribe([audio])[0]        
    transcriptions_es = pipe(audio)["text"]
    # Algoritmo here
    recomendacion = "definir variable"
    return transcriptions_es, recomendacion

inputs = gr.inputs.Audio(label="Dar click para escuchar tu voz", type="filepath", source="microphone")
output1 = gr.outputs.Textbox(label="Asi se ve tu código")
output2 = gr.outputs.Textbox(label="Tal vez quisiste decir:")

title = "Expresate con voz"
description = "Aplicación que ayuda a programar a traves de tu voz"
examples = ['definir función', 'definir variable', 'definir clase']
article = "<a  style='color:#eb9f59;' href = 'https://github.com/gandres-dev/Hackaton-Common-Voice'> Repositorio de la app"
demo = gr.Interface(fn=transcribe, inputs=inputs, outputs=[output1,output2],
                    title=title, description=description, article=article,
                    allow_flagging="never", theme="darkpeach", examples=examples,
                    #live=True
                    )

if __name__ == "__main__":
    demo.launch()