Spaces:
Running
Running
import streamlit as st | |
from st_audiorec import st_audiorec | |
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq | |
processor = AutoProcessor.from_pretrained("openai/whisper-small") | |
model = AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-small") | |
def transcribe(audio): | |
text = processor.batch_decode(model.generate(processor(audio), num_beams=4), skip_special_tokens=True) | |
return text | |
wav_audio_data = st_audiorec() | |
if wav_audio_data is not None: | |
# st.audio(wav_audio_data, format='audio/wav') | |
st.write("Transcription:") | |
st.write(transcribe(wav_audio_data)) | |
# Set up the Streamlit app | |
st.title("Glaswegian Transcription with Whisper") | |
api_key = st.sidebar.text_input("Enter your API key") | |
# Check if API key is provided | |
if api_key: | |
st.write("API key:", api_key) | |
# Add your code here to use the Whisper model for audio transcription | |
else: | |
st.warning("Please enter your API key in the sidebar.") |