Hematej's picture
Update app.py
130885f verified
raw
history blame
4.36 kB
import gradio as gr
from TTS.api import TTS
import torch
import os
css = """
#warning {background-color: #FFCCCB !important}
.feedback label textarea {height: auto !important;
font-size: 22px !important;
font-weight: 800 !important;
text-align: center !important;
color: #801313 !important;
padding: 0px !important}
#alert {background-color: #fff !important}
"""
# βœ… Check CPU/GPU availability before loading models
device = "cuda" if torch.cuda.is_available() else "cpu"
# βœ… Explicitly define `gpu` settings
tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=(device=="cuda"))
zh_tts = TTS(model_name="tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=False, gpu=(device=="cuda"))
de_tts = TTS(model_name="tts_models/de/thorsten/vits", gpu=(device=="cuda"))
es_tts = TTS(model_name="tts_models/es/mai/tacotron2-DDC", progress_bar=False, gpu=(device=="cuda"))
# βœ… Ensure correct weight loading
tts.to(device)
zh_tts.to(device)
de_tts.to(device)
es_tts.to(device)
def text_to_speech(text: str, speaker_wav: str, speaker_wav_file: str) -> str:
if len(text) > 0:
return change_aud(text, speaker_wav, speaker_wav_file)
else:
return "Error: No text provided."
def change_aud(text: str, speaker_wav: str, speaker_wav_file: str) -> str:
if speaker_wav_file and not speaker_wav:
speaker_wav = speaker_wav_file
file_path = "output.wav"
try:
if speaker_wav.endswith(".mp3"):
return "Error: MP3 format not supported. Convert to WAV."
if speaker_wav is not None:
tts.tts_to_file(text, speaker_wav=speaker_wav, language="en", file_path=file_path)
else:
tts.tts_to_file(text, speaker=tts.speakers[0], language="en", file_path=file_path)
# βœ… Debugging print statement to confirm output generation
if os.path.exists(file_path):
print(f"Generated file path: {file_path}, Size: {os.path.getsize(file_path)} bytes")
else:
print("Error: Output file was not created.")
return file_path
except Exception as e:
return f"Error generating cloned voice: {str(e)}"
def show_error(text: str):
return gr.update(visible=(text == ""), elem_id="warning", elem_classes="feedback")
title = "Voice-Cloning-Demo"
def toggle(choice: str):
return (
gr.update(visible=(choice == "mic"), value=None),
gr.update(visible=(choice != "mic"), value=None)
)
def change_color(text_input: str):
return gr.update(elem_id="warning" if len(text_input) == 0 else "alert", autofocus=(len(text_input) == 0))
def clear_color(text_input: str, radio: str, error_box: str):
return gr.update(elem_id="alert"), gr.update(value="mic"), gr.update(visible=False)
with gr.Blocks(css="footer {visibility: hidden}") as demo:
with gr.Row():
with gr.Column():
text_input = gr.Textbox(label="Input the text", value="", max_lines=4, lines=4)
radio = gr.Radio(["mic", "file"], value="mic", label="How would you like to upload your audio?")
audio_input_mic = gr.Audio(label="Voice to clone", sources="microphone", type="filepath", visible=True)
audio_input_file = gr.Audio(label="Voice to clone", type="filepath", visible=False)
with gr.Row():
with gr.Column():
btn_clear = gr.ClearButton([text_input, radio, audio_input_file])
with gr.Column():
btn = gr.Button("Generate", variant="primary")
with gr.Column():
audio_output = gr.Audio(label="Output", visible=True, autoplay=True, show_share_button=False)
error_box = gr.Textbox(label="WARNING", value="Input box cannot be blank!!", visible=False, container=True)
btn_clear.add(audio_output)
btn.click(text_to_speech, inputs=[text_input, audio_input_mic, audio_input_file], outputs=audio_output)
btn.click(show_error, text_input, [error_box, audio_output])
radio.change(toggle, radio, [audio_input_mic, audio_input_file])
btn_clear.click(clear_color, [text_input, radio, error_box], [text_input, radio, error_box])
btn.click(change_color, text_input, text_input)
demo.launch()