# -*- coding: utf-8 -*- import typing import gradio as gr import numpy as np import os import torch import torch.nn as nn import audiofile from tts import StyleTTS2 from textual import only_greek_or_only_latin, transliterate_number, fix_vocals import audresample import textwrap import nltk from audionar import VitsModel, VitsTokenizer from audiocraft import AudioGen audiogen = AudioGen().eval().to('cpu') nltk.download('punkt', download_dir='./') nltk.download('punkt_tab', download_dir='./') nltk.data.path.append('.') language_names = ['Ancient greek', 'English', 'Deutsch', 'French', 'Hungarian', 'Romanian', 'Serbian (Approx.)'] def audionar_tts(text=None, lang='Romanian', soundscape='', cache_lim=24): # https://huggingface.co/dkounadis/artificial-styletts2/blob/main/msinference.py lang_map = { 'ancient greek': 'grc', 'english': 'eng', 'deutsch': 'deu', 'french': 'fra', 'hungarian': 'hun', 'romanian': 'ron', 'serbian (approx.)': 'rmc-script_latin', } final_audio = None if text is None or text.strip() == '': text = 'No Audio or Txt Input' if lang not in language_names: # StyleTTS2 text = only_greek_or_only_latin(text, lang='eng') x = _tts.inference(text, ref_s='wav/' + lang + '.wav')[0, 0, :].numpy() # 24 Khz if x.shape[0] > 10: x = audresample.resample(signal=x.astype(np.float32), original_rate=24000, target_rate=16000)[0, :] # 16 KHz else: # VITS lang_code = lang_map.get(lang.lower(), lang.lower().split()[0].strip()) global cached_lang_code, cached_net_g, cached_tokenizer if 'cached_lang_code' not in globals() or cached_lang_code != lang_code: cached_lang_code = lang_code cached_net_g = VitsModel.from_pretrained(f'facebook/mms-tts-{lang_code}').eval() cached_tokenizer = VitsTokenizer.from_pretrained(f'facebook/mms-tts-{lang_code}') net_g = cached_net_g tokenizer = cached_tokenizer text = only_greek_or_only_latin(text, lang=lang_code) text = transliterate_number(text, lang=lang_code) text = fix_vocals(text, lang=lang_code) sentences = textwrap.wrap(text, width=439) total_audio_parts = [] for sentence in sentences: inputs = cached_tokenizer(sentence, return_tensors="pt") with torch.no_grad(): audio_part = cached_net_g( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, lang_code=lang_code, )[0, :] total_audio_parts.append(audio_part) x = torch.cat(total_audio_parts).cpu().numpy() if soundscape and soundscape.strip(): speech_duration_secs = len(x) / 16000 target_duration = max(speech_duration_secs + 0.74, 2.0) background_audio = audiogen.generate( soundscape, duration=target_duration, cache_lim=max(4, int(cache_lim)) # at least allow 10 A/R stEps ).numpy() # PAD len_speech = len(x) len_background = len(background_audio) if len_background > len_speech: padding = np.zeros(len_background - len_speech, dtype=np.float32) x = np.concatenate([x, padding]) elif len_speech > len_background: padding = np.zeros(len_speech - len_background, dtype=np.float32) background_audio = np.concatenate([background_audio, padding]) x = x[None, :] background_audio = background_audio[None, :] final_audio = np.concatenate([ 0.49 * x + 0.51 * background_audio, 0.51 * background_audio + 0.49 * x ], 0) else: final_audio = x wavfile = '_vits_.wav' audiofile.write(wavfile, final_audio, 16000) return wavfile # 2x file for [audio out & state to pass to the Emotion reco tAB] # TTS # VOICES = [f'wav/{vox}' for vox in os.listdir('wav')] # add unidecode (to parse non-roman characters for the StyleTTS2 # # for the VITS it should better skip the unknown letters - dont use unidecode()) # at generation fill the state of "last tts" # at record fill the state of "last record" and place in list of voice/langs for TTS VOICES = ['jv_ID_google-gmu_04982.wav', 'it_IT_mls_1595.wav', 'en_US_vctk_p303.wav', 'en_US_vctk_p306.wav', 'it_IT_mls_8842.wav', 'en_US_cmu_arctic_ksp.wav', 'jv_ID_google-gmu_05970.wav', 'en_US_vctk_p318.wav', 'ha_NE_openbible.wav', 'ne_NP_ne-google_0883.wav', 'en_US_vctk_p280.wav', 'bn_multi_1010.wav', 'en_US_vctk_p259.wav', 'it_IT_mls_844.wav', 'en_US_vctk_p269.wav', 'en_US_vctk_p285.wav', 'de_DE_m-ailabs_angela_merkel.wav', 'en_US_vctk_p316.wav', 'en_US_vctk_p362.wav', 'jv_ID_google-gmu_06207.wav', 'tn_ZA_google-nwu_9061.wav', 'fr_FR_tom.wav', 'en_US_vctk_p233.wav', 'it_IT_mls_4975.wav', 'en_US_vctk_p236.wav', 'bn_multi_01232.wav', 'bn_multi_5958.wav', 'it_IT_mls_9185.wav', 'en_US_vctk_p248.wav', 'en_US_vctk_p287.wav', 'it_IT_mls_9772.wav', 'te_IN_cmu-indic_sk.wav', 'tn_ZA_google-nwu_8333.wav', 'en_US_vctk_p260.wav', 'en_US_vctk_p247.wav', 'en_US_vctk_p329.wav', 'en_US_cmu_arctic_fem.wav', 'en_US_cmu_arctic_rms.wav', 'en_US_vctk_p308.wav', 'jv_ID_google-gmu_08736.wav', 'en_US_vctk_p245.wav', 'fr_FR_m-ailabs_nadine_eckert_boulet.wav', 'jv_ID_google-gmu_03314.wav', 'en_US_vctk_p239.wav', 'jv_ID_google-gmu_05540.wav', 'it_IT_mls_7440.wav', 'en_US_vctk_p310.wav', 'en_US_vctk_p237.wav', 'en_US_hifi-tts_92.wav', 'en_US_cmu_arctic_aew.wav', 'ne_NP_ne-google_2099.wav', 'en_US_vctk_p226.wav', 'af_ZA_google-nwu_1919.wav', 'jv_ID_google-gmu_03727.wav', 'en_US_vctk_p317.wav', 'tn_ZA_google-nwu_0378.wav', 'nl_pmk.wav', 'en_US_vctk_p286.wav', 'tn_ZA_google-nwu_3342.wav', # 'en_US_vctk_p343.wav', 'de_DE_m-ailabs_ramona_deininger.wav', 'jv_ID_google-gmu_03424.wav', 'en_US_vctk_p341.wav', 'jv_ID_google-gmu_03187.wav', 'ne_NP_ne-google_3960.wav', 'jv_ID_google-gmu_06080.wav', 'ne_NP_ne-google_3997.wav', # 'en_US_vctk_p267.wav', 'en_US_vctk_p240.wav', 'ne_NP_ne-google_5687.wav', 'ne_NP_ne-google_9407.wav', 'jv_ID_google-gmu_05667.wav', 'jv_ID_google-gmu_01519.wav', 'ne_NP_ne-google_7957.wav', 'it_IT_mls_4705.wav', 'ne_NP_ne-google_6329.wav', 'it_IT_mls_1725.wav', 'tn_ZA_google-nwu_8914.wav', 'en_US_ljspeech.wav', 'tn_ZA_google-nwu_4850.wav', 'en_US_vctk_p238.wav', 'en_US_vctk_p302.wav', 'jv_ID_google-gmu_08178.wav', 'en_US_vctk_p313.wav', 'af_ZA_google-nwu_2418.wav', 'bn_multi_00737.wav', 'en_US_vctk_p275.wav', # y 'af_ZA_google-nwu_0184.wav', 'jv_ID_google-gmu_07638.wav', 'ne_NP_ne-google_6587.wav', 'ne_NP_ne-google_0258.wav', 'en_US_vctk_p232.wav', 'en_US_vctk_p336.wav', 'jv_ID_google-gmu_09039.wav', 'en_US_vctk_p312.wav', 'af_ZA_google-nwu_8148.wav', 'en_US_vctk_p326.wav', 'en_US_vctk_p264.wav', 'en_US_vctk_p295.wav', # 'en_US_vctk_p298.wav', 'es_ES_m-ailabs_victor_villarraza.wav', 'pl_PL_m-ailabs_nina_brown.wav', 'tn_ZA_google-nwu_9365.wav', 'en_US_vctk_p294.wav', 'jv_ID_google-gmu_00658.wav', 'jv_ID_google-gmu_08305.wav', 'en_US_vctk_p330.wav', 'gu_IN_cmu-indic_cmu_indic_guj_dp.wav', 'jv_ID_google-gmu_05219.wav', 'en_US_vctk_p284.wav', 'de_DE_m-ailabs_eva_k.wav', # 'bn_multi_00779.wav', 'en_UK_apope.wav', 'en_US_vctk_p345.wav', 'it_IT_mls_6744.wav', 'en_US_vctk_p347.wav', 'en_US_m-ailabs_mary_ann.wav', 'en_US_m-ailabs_elliot_miller.wav', 'en_US_vctk_p279.wav', 'ru_RU_multi_nikolaev.wav', 'bn_multi_4811.wav', 'tn_ZA_google-nwu_7693.wav', 'bn_multi_01701.wav', 'en_US_vctk_p262.wav', # 'en_US_vctk_p266.wav', 'en_US_vctk_p243.wav', 'en_US_vctk_p297.wav', 'en_US_vctk_p278.wav', 'jv_ID_google-gmu_02059.wav', 'en_US_vctk_p231.wav', 'te_IN_cmu-indic_kpn.wav', 'en_US_vctk_p250.wav', 'it_IT_mls_4974.wav', 'en_US_cmu_arctic_awbrms.wav', # 'en_US_vctk_p263.wav', 'nl_femal.wav', 'tn_ZA_google-nwu_6116.wav', 'jv_ID_google-gmu_06383.wav', 'en_US_vctk_p225.wav', 'en_US_vctk_p228.wav', 'it_IT_mls_277.wav', 'tn_ZA_google-nwu_7866.wav', 'en_US_vctk_p300.wav', 'ne_NP_ne-google_0649.wav', 'es_ES_carlfm.wav', 'jv_ID_google-gmu_06510.wav', 'de_DE_m-ailabs_rebecca_braunert_plunkett.wav', 'en_US_vctk_p340.wav', 'en_US_cmu_arctic_gka.wav', 'ne_NP_ne-google_2027.wav', 'jv_ID_google-gmu_09724.wav', 'en_US_vctk_p361.wav', 'ne_NP_ne-google_6834.wav', 'jv_ID_google-gmu_02326.wav', 'fr_FR_m-ailabs_zeckou.wav', 'tn_ZA_google-nwu_1932.wav', # 'female-20-happy.wav', 'tn_ZA_google-nwu_1483.wav', 'de_DE_thorsten-emotion_amused.wav', 'ru_RU_multi_minaev.wav', 'sw_lanfrica.wav', 'en_US_vctk_p271.wav', 'tn_ZA_google-nwu_0441.wav', 'it_IT_mls_6001.wav', 'en_US_vctk_p305.wav', 'it_IT_mls_8828.wav', 'jv_ID_google-gmu_08002.wav', 'it_IT_mls_2033.wav', 'tn_ZA_google-nwu_3629.wav', 'it_IT_mls_6348.wav', 'en_US_cmu_arctic_axb.wav', 'it_IT_mls_8181.wav', 'en_US_vctk_p230.wav', 'af_ZA_google-nwu_7214.wav', 'nl_nathalie.wav', 'it_IT_mls_8207.wav', 'ko_KO_kss.wav', 'af_ZA_google-nwu_6590.wav', 'jv_ID_google-gmu_00264.wav', 'tn_ZA_google-nwu_6234.wav', 'jv_ID_google-gmu_05522.wav', 'en_US_cmu_arctic_lnh.wav', 'en_US_vctk_p272.wav', 'en_US_cmu_arctic_slp.wav', 'en_US_vctk_p299.wav', 'en_US_hifi-tts_9017.wav', 'it_IT_mls_4998.wav', 'it_IT_mls_6299.wav', 'en_US_cmu_arctic_rxr.wav', # 'female-46-neutral.wav', 'jv_ID_google-gmu_01392.wav', 'tn_ZA_google-nwu_8512.wav', 'en_US_vctk_p244.wav', # 'bn_multi_3108.wav', # 'it_IT_mls_7405.wav', # 'bn_multi_3713.wav', # 'yo_openbible.wav', # 'jv_ID_google-gmu_01932.wav', 'en_US_vctk_p270.wav', 'tn_ZA_google-nwu_6459.wav', 'bn_multi_4046.wav', 'en_US_vctk_p288.wav', 'en_US_vctk_p251.wav', 'es_ES_m-ailabs_tux.wav', 'tn_ZA_google-nwu_6206.wav', 'bn_multi_9169.wav', # 'en_US_vctk_p293.wav', # 'en_US_vctk_p255.wav', 'af_ZA_google-nwu_8963.wav', # 'en_US_vctk_p265.wav', 'gu_IN_cmu-indic_cmu_indic_guj_ad.wav', 'jv_ID_google-gmu_07335.wav', 'en_US_vctk_p323.wav', 'en_US_vctk_p281.wav', 'en_US_cmu_arctic_bdl.wav', 'en_US_m-ailabs_judy_bieber.wav', 'it_IT_mls_10446.wav', 'en_US_vctk_p261.wav', 'en_US_vctk_p292.wav', 'te_IN_cmu-indic_ss.wav', 'en_US_vctk_p311.wav', 'it_IT_mls_12428.wav', 'en_US_cmu_arctic_aup.wav', 'jv_ID_google-gmu_04679.wav', 'it_IT_mls_4971.wav', 'en_US_cmu_arctic_ljm.wav', 'fa_haaniye.wav', 'en_US_vctk_p339.wav', 'tn_ZA_google-nwu_7896.wav', 'en_US_vctk_p253.wav', 'it_IT_mls_5421.wav', # 'ne_NP_ne-google_0546.wav', 'vi_VN_vais1000.wav', 'en_US_vctk_p229.wav', 'en_US_vctk_p254.wav', 'en_US_vctk_p258.wav', 'it_IT_mls_7936.wav', 'en_US_vctk_p301.wav', 'tn_ZA_google-nwu_0045.wav', 'it_IT_mls_659.wav', 'tn_ZA_google-nwu_7674.wav', 'it_IT_mls_12804.wav', 'el_GR_rapunzelina.wav', 'en_US_hifi-tts_6097.wav', 'en_US_vctk_p257.wav', 'jv_ID_google-gmu_07875.wav', 'it_IT_mls_1157.wav', 'it_IT_mls_643.wav', 'en_US_vctk_p304.wav', 'ru_RU_multi_hajdurova.wav', 'it_IT_mls_8461.wav', 'bn_multi_3958.wav', 'it_IT_mls_1989.wav', 'en_US_vctk_p249.wav', # 'bn_multi_0834.wav', 'en_US_vctk_p307.wav', 'es_ES_m-ailabs_karen_savage.wav', 'fr_FR_m-ailabs_bernard.wav', 'en_US_vctk_p252.wav', 'en_US_cmu_arctic_jmk.wav', 'en_US_vctk_p333.wav', 'tn_ZA_google-nwu_4506.wav', 'ne_NP_ne-google_0283.wav', 'de_DE_m-ailabs_karlsson.wav', 'en_US_cmu_arctic_awb.wav', 'en_US_vctk_p246.wav', 'en_US_cmu_arctic_clb.wav', 'en_US_vctk_p364.wav', 'nl_flemishguy.wav', 'en_US_vctk_p276.wav', # y # 'en_US_vctk_p274.wav', 'fr_FR_m-ailabs_gilles_g_le_blanc.wav', 'it_IT_mls_7444.wav', 'style_o22050.wav', 'en_US_vctk_s5.wav', 'en_US_vctk_p268.wav', 'it_IT_mls_6807.wav', 'it_IT_mls_2019.wav', # 'male-60-angry.wav', 'af_ZA_google-nwu_8924.wav', 'en_US_vctk_p374.wav', 'en_US_vctk_p363.wav', 'it_IT_mls_644.wav', 'ne_NP_ne-google_3614.wav', 'en_US_vctk_p241.wav', 'ne_NP_ne-google_3154.wav', 'en_US_vctk_p234.wav', 'it_IT_mls_8384.wav', 'fr_FR_m-ailabs_ezwa.wav', 'it_IT_mls_5010.wav', 'en_US_vctk_p351.wav', 'en_US_cmu_arctic_eey.wav', 'jv_ID_google-gmu_04285.wav', 'jv_ID_google-gmu_06941.wav', 'hu_HU_diana-majlinger.wav', 'tn_ZA_google-nwu_2839.wav', 'bn_multi_03042.wav', 'tn_ZA_google-nwu_5628.wav', 'it_IT_mls_4649.wav', 'af_ZA_google-nwu_7130.wav', 'en_US_cmu_arctic_slt.wav', 'jv_ID_google-gmu_04175.wav', 'gu_IN_cmu-indic_cmu_indic_guj_kt.wav', 'jv_ID_google-gmu_00027.wav', 'jv_ID_google-gmu_02884.wav', 'en_US_vctk_p360.wav', 'en_US_vctk_p334.wav', # 'male-27-sad.wav', 'tn_ZA_google-nwu_1498.wav', 'fi_FI_harri-tapani-ylilammi.wav', 'bn_multi_rm.wav', 'ne_NP_ne-google_2139.wav', 'pl_PL_m-ailabs_piotr_nater.wav', 'fr_FR_siwis.wav', 'nl_bart-de-leeuw.wav', 'jv_ID_google-gmu_04715.wav', 'en_US_vctk_p283.wav', 'en_US_vctk_p314.wav', 'en_US_vctk_p335.wav', 'jv_ID_google-gmu_07765.wav', 'en_US_vctk_p273.wav' ] VOICES = [t[:-4] for t in VOICES] # crop .wav for visuals in gr.DropDown _tts = StyleTTS2().to('cpu') with gr.Blocks(theme='huggingface') as demo: with gr.Row(): text_input = gr.Textbox( label="Type text for TTS:", placeholder="Type Text for TTS", lines=4, value='Η γρηγορη καφετι αλεπου πειδαει πανω απο τον τεμπελη σκυλο.', ) choice_dropdown = gr.Dropdown( choices=language_names + VOICES, label="Select Voice or Language", value=VOICES[0] ) soundscape_input = gr.Textbox( lines=1, value="frogs", label="AudioGen Txt" ) kv_input = gr.Number( label="Sounds diversity", value=24, ) generate_button = gr.Button("Generate Audio", variant="primary") output_audio = gr.Audio(label="TTS Output") generate_button.click( fn=audionar_tts, inputs=[text_input, choice_dropdown, soundscape_input, kv_input], outputs=[output_audio] ) demo.launch(debug=True)