Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
from snac import SNAC | |
import torch | |
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from huggingface_hub import snapshot_download | |
from dotenv import load_dotenv | |
load_dotenv() | |
# Check if CUDA is available | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print("Loading SNAC model...") | |
snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz") | |
snac_model = snac_model.to(device) | |
model_name = "Vyvo/VyvoTTS-LFM2-Multi-Speaker" | |
# Download only model config and safetensors | |
snapshot_download( | |
repo_id=model_name, | |
allow_patterns=[ | |
"config.json", | |
"*.safetensors", | |
"model.safetensors.index.json", | |
], | |
ignore_patterns=[ | |
"optimizer.pt", | |
"pytorch_model.bin", | |
"training_args.bin", | |
"scheduler.pt", | |
"tokenizer.json", | |
"tokenizer_config.json", | |
"special_tokens_map.json", | |
"vocab.json", | |
"merges.txt", | |
"tokenizer.*" | |
] | |
) | |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16) | |
model.to(device) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
print(f"Model loaded to {device}") | |
# LFM2 Special Tokens Configuration (Sizin doğru değerleriniz) | |
TOKENIZER_LENGTH = 64400 | |
START_OF_TEXT = 1 | |
END_OF_TEXT = 7 | |
START_OF_SPEECH = TOKENIZER_LENGTH + 1 | |
END_OF_SPEECH = TOKENIZER_LENGTH + 2 | |
START_OF_HUMAN = TOKENIZER_LENGTH + 3 | |
END_OF_HUMAN = TOKENIZER_LENGTH + 4 | |
START_OF_AI = TOKENIZER_LENGTH + 5 | |
END_OF_AI = TOKENIZER_LENGTH + 6 | |
PAD_TOKEN = TOKENIZER_LENGTH + 7 | |
AUDIO_TOKENS_START = TOKENIZER_LENGTH + 10 | |
# Process text prompt (Sizin doğru formatınız) | |
def process_prompt(prompt, voice, tokenizer, device): | |
prompt = f"{voice}: {prompt}" | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids | |
start_token = torch.tensor([[START_OF_HUMAN]], dtype=torch.int64) | |
end_tokens = torch.tensor([[END_OF_TEXT, END_OF_HUMAN]], dtype=torch.int64) | |
modified_input_ids = torch.cat([start_token, input_ids, end_tokens], dim=1) | |
# No padding needed for single input | |
attention_mask = torch.ones_like(modified_input_ids) | |
return modified_input_ids.to(device), attention_mask.to(device) | |
# Parse output tokens to audio (Sizin doğru formatınız) | |
def parse_output(generated_ids): | |
token_to_find = START_OF_SPEECH | |
token_to_remove = END_OF_SPEECH | |
token_indices = (generated_ids == token_to_find).nonzero(as_tuple=True) | |
if len(token_indices[1]) > 0: | |
last_occurrence_idx = token_indices[1][-1].item() | |
cropped_tensor = generated_ids[:, last_occurrence_idx+1:] | |
else: | |
cropped_tensor = generated_ids | |
processed_rows = [] | |
for row in cropped_tensor: | |
masked_row = row[row != token_to_remove] | |
processed_rows.append(masked_row) | |
code_lists = [] | |
for row in processed_rows: | |
row_length = row.size(0) | |
new_length = (row_length // 7) * 7 | |
trimmed_row = row[:new_length] | |
trimmed_row = [t - AUDIO_TOKENS_START for t in trimmed_row] | |
code_lists.append(trimmed_row) | |
return code_lists[0] # Return just the first one for single sample | |
# Redistribute codes for audio generation (Aynı kalıyor) | |
def redistribute_codes(code_list, snac_model): | |
device = next(snac_model.parameters()).device # Get the device of SNAC model | |
layer_1 = [] | |
layer_2 = [] | |
layer_3 = [] | |
for i in range((len(code_list)+1)//7): | |
layer_1.append(code_list[7*i]) | |
layer_2.append(code_list[7*i+1]-4096) | |
layer_3.append(code_list[7*i+2]-(2*4096)) | |
layer_3.append(code_list[7*i+3]-(3*4096)) | |
layer_2.append(code_list[7*i+4]-(4*4096)) | |
layer_3.append(code_list[7*i+5]-(5*4096)) | |
layer_3.append(code_list[7*i+6]-(6*4096)) | |
# Move tensors to the same device as the SNAC model | |
codes = [ | |
torch.tensor(layer_1, device=device).unsqueeze(0), | |
torch.tensor(layer_2, device=device).unsqueeze(0), | |
torch.tensor(layer_3, device=device).unsqueeze(0) | |
] | |
audio_hat = snac_model.decode(codes) | |
return audio_hat.detach().squeeze().cpu().numpy() # Always return CPU numpy array | |
# Main generation function | |
def generate_speech(text, voice, temperature, top_p, repetition_penalty, max_new_tokens, progress=gr.Progress()): | |
if not text.strip(): | |
return None | |
try: | |
progress(0.1, f"Processing text with {voice} voice...") | |
input_ids, attention_mask = process_prompt(text, voice, tokenizer, device) | |
progress(0.3, "Generating speech tokens...") | |
with torch.no_grad(): | |
generated_ids = model.generate( | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
temperature=temperature, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
num_return_sequences=1, | |
eos_token_id=END_OF_SPEECH, # Doğru EOS token | |
) | |
progress(0.6, "Processing speech tokens...") | |
code_list = parse_output(generated_ids) | |
progress(0.8, "Converting to audio...") | |
audio_samples = redistribute_codes(code_list, snac_model) | |
progress(1.0, f"✅ Completed with {voice}!") | |
return (24000, audio_samples) # Return sample rate and audio | |
except Exception as e: | |
print(f"Error generating speech: {e}") | |
return None | |
# Examples for the UI - Genshin karakterleri ile | |
examples = [ | |
["Hey there! I am ready to help you on your adventure in Teyvat.", "Tighnari", 0.6, 0.95, 1.1, 1200], | |
["The wind brings new adventures and ancient secrets to discover.", "Kaeya", 0.7, 0.95, 1.1, 1200], | |
["Let me share the wisdom of the elements with you, traveler.", "Nahida", 0.6, 0.9, 1.2, 1200], | |
["Every journey begins with a single step forward into the unknown.", "Noelle", 0.65, 0.9, 1.1, 1200], | |
["The stars above guide us through even the darkest of nights.", "Furina", 0.7, 0.95, 1.1, 1200], | |
["Together we can explore the mysteries of this vast world.", "Lyney", 0.65, 0.9, 1.15, 1200], | |
["Knowledge is power, but wisdom is knowing how to use it.", "Alhaitham", 0.7, 0.95, 1.1, 1200], | |
["The beauty of Sumeru never fails to take my breath away.", "Collei", 0.6, 0.95, 1.1, 1200] | |
] | |
# Available voices - Genshin karakterleri ve diğerleri | |
VOICES = [ | |
"Stephen_Fry", | |
"Tighnari", | |
"Thoma", | |
"Shikanoin_Heizou", | |
"Noelle", | |
"Ningguang", | |
"Nilou", | |
"Neuvillette", | |
"Navia", | |
"Nahida", | |
"Mualani", | |
"Lyney", | |
"Lynette", | |
"Layla", | |
"Kaveh", | |
"Kaeya", | |
"Furina", | |
"Dehya", | |
"Cyno", | |
"Collei", | |
"Beidou", | |
"Alhaitham", | |
"Arataki_Itto", | |
"Jenny_Voice", | |
"Optimus_Prime" | |
] | |
# Available Emotive Tags | |
EMOTIVE_TAGS = ["`<laugh>`", "`<chuckle>`", "`<sigh>`", "`<cough>`", "`<sniffle>`", "`<groan>`", "`<yawn>`", "`<gasp>`"] | |
# Create Gradio interface | |
with gr.Blocks(title="VyvoTTS Multi-Speaker") as demo: | |
gr.Markdown(f""" | |
# 🎮 VyvoTTS Multi-Speaker | |
VyvoTTS is a text-to-speech model by Vyvo team using LFM2 architecture, trained on multiple diverse open-source datasets. | |
Since some datasets may contain transcription errors or quality issues, output quality can vary. | |
Higher quality datasets typically produce better speech synthesis results. | |
**Available Character Voices:** | |
🌟 Genshin Impact: Tighnari, Thoma, Heizou, Noelle, Ningguang, Nilou, Neuvillette, Navia, Nahida, Mualani, Lyney, Lynette, Layla, Kaveh, Kaeya, Furina, Dehya, Cyno, Collei, Beidou, Alhaitham, Itto | |
🎭 Others: Stephen Fry, Jenny Voice, Optimus Prime | |
## Tips for better prompts: | |
- Add paralinguistic elements like {", ".join(EMOTIVE_TAGS)} or `uhm` for more human-like speech. | |
- Longer text prompts generally work better than very short phrases | |
- Increasing `repetition_penalty` and `temperature` makes the model speak faster. | |
**Note:** Output quality may vary depending on the source dataset quality for each character voice. | |
""") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
text_input = gr.Textbox( | |
label="Text to speak", | |
placeholder="Enter your text here...", | |
lines=5 | |
) | |
voice = gr.Dropdown( | |
choices=VOICES, | |
value="Tighnari", | |
label="Character Voice" | |
) | |
with gr.Accordion("Advanced Settings", open=False): | |
temperature = gr.Slider( | |
minimum=0.1, maximum=1.5, value=0.6, step=0.05, | |
label="Temperature", | |
info="Higher values (0.7-1.0) create more expressive but less stable speech" | |
) | |
top_p = gr.Slider( | |
minimum=0.1, maximum=1.0, value=0.95, step=0.05, | |
label="Top P", | |
info="Nucleus sampling threshold" | |
) | |
repetition_penalty = gr.Slider( | |
minimum=1.0, maximum=2.0, value=1.1, step=0.05, | |
label="Repetition Penalty", | |
info="Higher values discourage repetitive patterns" | |
) | |
max_new_tokens = gr.Slider( | |
minimum=100, maximum=2000, value=1200, step=100, | |
label="Max Length", | |
info="Maximum length of generated audio (in tokens)" | |
) | |
with gr.Row(): | |
submit_btn = gr.Button("Generate Speech", variant="primary") | |
clear_btn = gr.Button("Clear") | |
with gr.Column(scale=2): | |
audio_output = gr.Audio(label="Generated Speech", type="numpy") | |
# Set up examples | |
gr.Examples( | |
examples=examples, | |
inputs=[text_input, voice, temperature, top_p, repetition_penalty, max_new_tokens], | |
outputs=audio_output, | |
fn=generate_speech, | |
cache_examples=True, | |
) | |
# Set up event handlers | |
submit_btn.click( | |
fn=generate_speech, | |
inputs=[text_input, voice, temperature, top_p, repetition_penalty, max_new_tokens], | |
outputs=audio_output, | |
show_progress=True | |
) | |
clear_btn.click( | |
fn=lambda: (None, None), | |
inputs=[], | |
outputs=[text_input, audio_output] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.queue().launch(share=False, ssr_mode=False) |