Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import base64 | |
| import io | |
| from huggingface_hub import InferenceClient | |
| from gtts import gTTS | |
| from audiorecorder import audiorecorder | |
| import speech_recognition as sr | |
| from pydub import AudioSegment | |
| pre_prompt_text = "You are a behavioral AI, your answers should be brief, stoic and humanistic." | |
| if "history" not in st.session_state: | |
| st.session_state.history = [] | |
| if "pre_prompt_sent" not in st.session_state: | |
| st.session_state.pre_prompt_sent = False | |
| def recognize_speech(audio_data, show_messages=True): | |
| recognizer = sr.Recognizer() | |
| audio_recording = sr.AudioFile(audio_data) | |
| with audio_recording as source: | |
| audio = recognizer.record(source) | |
| try: | |
| audio_text = recognizer.recognize_google(audio, language="es-ES") | |
| if show_messages: | |
| st.subheader("Recognized text:") | |
| st.write(audio_text) | |
| st.success("Completed.") | |
| except sr.UnknownValueError: | |
| st.warning("The audio could not be recognized. Did you try to record something?") | |
| audio_text = "" | |
| except sr.RequestError: | |
| st.error("Talk to me to get started!") | |
| audio_text = "" | |
| return audio_text | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| if not st.session_state.pre_prompt_sent: | |
| prompt += f"[INST] {pre_prompt_text} [/INST]" | |
| st.session_state.pre_prompt_sent = True | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate(audio_text, history, temperature=None, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0): | |
| client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
| temperature = float(temperature) if temperature is not None else 0.9 | |
| temperature = max(temperature, 1e-2) | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42) | |
| formatted_prompt = format_prompt(audio_text, history) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True) | |
| response = "" | |
| for response_token in stream: | |
| response += response_token.token.text | |
| response = ' '.join(response.split()).replace('</s>', '') | |
| audio_file = text_to_speech(response, speed=1.3) | |
| return response, audio_file | |
| def text_to_speech(text, speed=1.3): | |
| engine = TeotronEngine() | |
| try: | |
| engine.set_speed(speed) | |
| engine.say(text) | |
| engine.runAndWait() | |
| encoded, _ = engine.getAudioAsMpegLayer3(engine.getDuration()) | |
| return base64.b64encode(encoded).decode() | |
| except TeotronEngineError as e: | |
| print(f"Teotron error: {str(e)}") | |
| return None | |
| def main(): | |
| audio_data = audiorecorder("Push to Play", "Stop Recording...") | |
| if not audio_data.empty(): | |
| st.audio(audio_data.export().read(), format="audio/mpeg;base64,{0}".format(base64.b64encode(audio_data.export().read()).decode()), autoplay=True) | |
| audio_data.export("audio.mp3", format="mp3") | |
| audio_text = recognize_speech("audio.mp3") | |
| if audio_text: | |
| output, audio_file = generate(audio_text, history=st.session_state.history) | |
| if audio_file is not None: | |
| st.markdown( | |
| f"""<audio controls="controls" src="data:audio/mpeg;base64,{base64.b64encode(audio_file.read()).decode()}"></audio>""", | |
| unsafe_allow_html=True) | |
| if __name__ == "__main__": | |
| main() |