gradiodemo / app.py
adinarayana's picture
Update app.py
63523f5 verified
raw
history blame
1.52 kB
from transformers import TTSForConditionalGeneration, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from datasets import load_dataset
from gradio import Interface
tts_model = TTSForConditionalGeneration.from_pretrained("facebook/s2s-wav2vec2-ft")
whisper_model = AutoModelForSeq2SeqLM.from_pretrained("openai/whisper-medium")
gpt2_xl_model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-xl")
def voice_chat(user_voice):
messages = [{"role": "system", "content": "You are a kind helpful assistant."}]
user_message = whisper_model.generate(user_voice)["generated_text"]
messages.append({"role": "user", "content": user_message})
print(messages)
# Utilize gpt2-xl locally for chat completion
with gpt2_xl_model.no_grad():
input_ids = gpt2_xl_model.tokenizer(messages, return_tensors="pt").input_ids
outputs = gpt2_xl_model.generate(input_ids, max_length=100)
chat_response = gpt2_xl_model.tokenizer.decode(outputs[0], skip_special_tokens=True)
reply = chat_response
messages.append({"role": "assistant", "content": reply})
audio = tts_model.generate(text=reply, return_tensors="pt").tolist()[0][0]
return reply, audio
text_reply = Interface.Textbox(label="ChatGPT Text")
voice_reply = Interface.Audio(type="audio/wav")
Interface(
title="AI Voice Assistant with ChatGPT AI",
fn=voice_chat,
inputs=[Interface.Audio(source="microphone", type="filepath")],
outputs=[text_reply, voice_reply],
live=True,
).launch(debug=True)