import os os.system("pip install git+https://github.com/openai/whisper.git") import gradio as gr import whisper model = whisper.load_model("small") language_id_lookup = { "English" : "en", "German" : "de", "Greek" : "el", "Spanish" : "es", "Finnish" : "fi", "Russian" : "ru", "Hungarian" : "hu", "Dutch" : "nl", "French" : "fr", 'Polish' : "pl", 'Portuguese': "pt", 'Italian' : "it", } def predict(audio, mic_audio=None, language): # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels)) if mic_audio is not None: input_audio = mic_audio elif audio is not None: input_audio = audio else: return "(please provide audio)" audio = whisper.load_audio(input_audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(model.device) options = whisper.DecodingOptions(fp16 = False) result = whisper.decode(model, mel, options) if(language == "Detect Language"): outLanguage, probs = model.detect_language(mel) else: outLanguage = language_id_lookup(language.split()[0]) print(result.text + " " + outLanguage) return result.text, outLanguage title = "Demo for Whisper -> Something -> XLS-R" description = """ How to use: Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before being passed into the model. The output is the text transcription of the audio. """ gr.Interface( fn=predict, inputs=[ gr.Audio(label="Upload Speech", source="upload", type="filepath"), gr.Audio(label="Record Speech", source="microphone", type="filepath"), gr.inputs.Dropdown(['English Text', 'German Text', 'Greek Text', 'Spanish Text', 'Finnish Text', 'Russian Text', 'Hungarian Text', 'Dutch Text', 'French Text', 'Polish Text', 'Portuguese Text', 'Italian Text', 'Detect Language'], type="value", default='English Text', label="Select the Language of the that you are speaking in.") ], outputs=[ gr.Text(label="Transcription"), ], title=title, description=description, ).launch()