diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..eadb86287b934ae2df85019088299651f72b0271 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +/data/your_dataset/ +/data/your_training_dataset/ +/data/your_training_dataset_non_preprocessed/ +/ckpts/your_training_dataset/ +/preprocess \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..1f572cc141aefa1e6f11c8fe48bd646e5b4d73f1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "src/third_party/BigVGAN"] + path = src/third_party/BigVGAN + url = https://github.com/NVIDIA/BigVGAN.git diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..021669c0a298a678e3498c32782103c4f6f9629c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-devel + +USER root + +ARG DEBIAN_FRONTEND=noninteractive + +LABEL github_repo="https://github.com/SWivid/F5-TTS" + +RUN set -x \ + && apt-get update \ + && apt-get -y install wget curl man git less openssl libssl-dev unzip unar build-essential aria2 tmux vim \ + && apt-get install -y openssh-server sox libsox-fmt-all libsox-fmt-mp3 libsndfile1-dev ffmpeg \ + && apt-get install -y librdmacm1 libibumad3 librdmacm-dev libibverbs1 libibverbs-dev ibverbs-utils ibverbs-providers \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +WORKDIR /workspace + +RUN git clone https://github.com/SWivid/F5-TTS.git \ + && cd F5-TTS \ + && git submodule update --init --recursive \ + && pip install -e . --no-cache-dir + +ENV SHELL=/bin/bash + +WORKDIR /workspace/F5-TTS diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..98230507a05c8577744b5e6445895712fe285eff --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Yushen CHEN + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..9ffad9b1bb01e4a36eb4b8dec22893e2efe4bbe4 --- /dev/null +++ b/app.py @@ -0,0 +1,77 @@ +from flask import Flask, request, send_file, render_template +import subprocess +import os +import sys +from datetime import datetime +from io import BytesIO + +app = Flask(__name__) + +def run_f5_tts(ref_audio_path, ref_text, gen_text, model="F5TTS_Base", speed=1.2, vocoder_name="vocos"): + current_dir = os.path.dirname(os.path.abspath(__file__)) + infer_cli_path = os.path.join(current_dir, "src", "f5_tts", "infer", "infer_cli.py") + vocab_file = os.path.join(current_dir, "F5-TTS-MRSU", "vocab.txt") + ckpt_file = os.path.join(current_dir, "F5-TTS-MRSU", "model_last.pt") + tests_dir = os.path.join(current_dir, "tests") + + os.environ['PYTHONIOENCODING'] = 'utf-8' + + # Tăng speed để thử nghiệm (tùy chỉnh theo tài liệu) + command = [ + sys.executable, + infer_cli_path, + "--model", model, + "--ref_audio", ref_audio_path, + "--ref_text", ref_text, + "--gen_text", gen_text, + "--speed", str(speed), # Tăng tốc độ lên 1.2 + "--vocoder_name", vocoder_name, + "--vocab_file", vocab_file, + "--ckpt_file", ckpt_file + ] + + try: + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + encoding='utf-8' + ) + if os.path.exists(tests_dir): + wav_files = [f for f in os.listdir(tests_dir) if f.endswith('.wav')] + if wav_files: + latest_wav = max(wav_files, key=lambda x: os.path.getmtime(os.path.join(tests_dir, x))) + output_file = os.path.join(tests_dir, latest_wav) + return True, output_file + return False, "Không tìm thấy file âm thanh trong thư mục tests" + except subprocess.CalledProcessError as e: + return False, e.stderr + except Exception as e: + return False, str(e) + +@app.route('/') +def home(): + return render_template('index.html') + +@app.route('/api/generate', methods=['POST']) +def generate_speech(): + ref_audio = request.files['ref_audio'] + ref_text = request.form['ref_text'] + gen_text = request.form['gen_text'] + model = request.form.get('model', 'F5TTS_Base') + speed = float(request.form.get('speed', 1.2)) # Mặc định tăng tốc độ + + ref_audio_path = 'temp_ref.wav' + ref_audio.save(ref_audio_path) + + success, result = run_f5_tts(ref_audio_path, ref_text, gen_text, model, speed) + os.remove(ref_audio_path) + + if success: + return send_file(result, mimetype='audio/wav') + else: + return result, 500 + +if __name__ == "__main__": + app.run(debug=True) \ No newline at end of file diff --git a/check_vocab_pretrained.py b/check_vocab_pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..da28d5dd9c4a7f8448bcd11034f332b8322c8094 --- /dev/null +++ b/check_vocab_pretrained.py @@ -0,0 +1,68 @@ +""" +Kiểm tra vocab trước khi pretraining hoặc fine-tuning mô hình lớn (LLM hoặc Speech). +Mục tiêu: Đảm bảo bộ vocab bao phủ đầy đủ token của Tiếng Việt. +""" + +import os + +# Định nghĩa đường dẫn file vocab +PRETRAINED_VOCAB_PATH = "data/Emilia_ZH_EN_pinyin/vocab.txt" +DATASET_VOCAB_PATH = "data/your_training_dataset/vocab_your_dataset.txt" +OUTPUT_VOCAB_PATH = "data/your_training_dataset/vocab.txt" + + +def load_vocab(file_path: str) -> list: + """ + Đọc danh sách token từ file vocab. + + Args: + file_path (str): Đường dẫn đến file vocab. + + Returns: + list: danh sách các token trong file. + """ + if not os.path.exists(file_path): + raise FileNotFoundError(f"File không tồn tại: {file_path}") + + with open(file_path, "r", encoding="utf8") as file: + return [line.replace("\n", "") for line in file] + + +def save_vocab(file_path: str, vocab: list): + """ + Lưu danh sách token vào file vocab. + + Args: + file_path (str): Đường dẫn file đầu ra. + vocab (list): Danh sách token cần lưu. + """ + with open(file_path, "w", encoding="utf8") as file: + file.writelines(f"{token}\n" for token in vocab) + + +def process_vocab(): + """ + Kiểm tra và mở rộng vocab nếu cần thiết. + """ + # Load vocab từ file + tokens_pretrained = load_vocab(PRETRAINED_VOCAB_PATH) + tokens_your_dataset = load_vocab(DATASET_VOCAB_PATH) + + # Tìm token trong dataset nhưng không có trong pretrained + tokens_missing = [] + + for token in tokens_your_dataset: + if token not in tokens_pretrained: + tokens_missing.append(token) + + print(f"Số token thiếu trong vocab pretrained: {len(tokens_missing)}") + + # Tạo vocab mới và lưu lại + new_vocab = tokens_pretrained + list(tokens_missing) + save_vocab(OUTPUT_VOCAB_PATH, new_vocab) + + print(f"Vocab mới đã được lưu tại {OUTPUT_VOCAB_PATH}, tổng số token: {len(new_vocab)}") + + +if __name__ == "__main__": + process_vocab() \ No newline at end of file diff --git a/convert_sr.py b/convert_sr.py new file mode 100644 index 0000000000000000000000000000000000000000..cb930d11bb8641c9480ec8cc8411a5df2c8e7b0a --- /dev/null +++ b/convert_sr.py @@ -0,0 +1,58 @@ +import glob +import os +import subprocess +from multiprocessing import Pool +from pathlib import Path +from shutil import move + +from tqdm import tqdm + + +def convert_sr(audio_path: str) -> None: + """ + Chuyển đổi tần số lấy mẫu của file âm thanh thành 24kHz. + """ + audio_path = Path(audio_path) + output_path = audio_path.with_name(f"{audio_path.stem}_24k.wav") + subprocess.run( + ["sox", str(audio_path), "-r", "24000", "-c", "1", str(output_path)], + check=True + ) + + +def remove_original(audio_path: str) -> None: + """ + Xóa file gốc nếu nó không phải là file đã được chuyển đổi sang 24kHz. + """ + if "_24k.wav" not in audio_path: + os.remove(audio_path) + + +def rename_audio(audio_path: str) -> None: + """ + Xóa hậu tố '_24k' khỏi tên file để đặt lại tên cho đúng chuẩn. + """ + audio_path = Path(audio_path) + new_path = audio_path.with_name(audio_path.stem.replace("_24k", "") + ".wav") + move(audio_path, new_path) + + +def process_audio_files(function, wav_paths): + """ + Xử lý các file âm thanh với hàm được chỉ định sử dụng đa luồng. + """ + with Pool(processes=16) as pool: + list(tqdm(pool.imap(function, wav_paths), total=len(wav_paths))) + + +if __name__ == "__main__": + dataset_path = "data/your_dataset/*.wav" + + # Chuyển đổi sample rate + process_audio_files(convert_sr, glob.glob(dataset_path)) + + # Xóa file gốc + process_audio_files(remove_original, glob.glob(dataset_path)) + + # Đổi tên file + process_audio_files(rename_audio, glob.glob(dataset_path)) \ No newline at end of file diff --git a/data/Emilia_ZH_EN_pinyin/vocab.txt b/data/Emilia_ZH_EN_pinyin/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..a30a90c12e1ab38b95c97770d5c5cd1d03c392e2 --- /dev/null +++ b/data/Emilia_ZH_EN_pinyin/vocab.txt @@ -0,0 +1,2545 @@ + +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +_ +a +a1 +ai1 +ai2 +ai3 +ai4 +an1 +an3 +an4 +ang1 +ang2 +ang4 +ao1 +ao2 +ao3 +ao4 +b +ba +ba1 +ba2 +ba3 +ba4 +bai1 +bai2 +bai3 +bai4 +ban1 +ban2 +ban3 +ban4 +bang1 +bang2 +bang3 +bang4 +bao1 +bao2 +bao3 +bao4 +bei +bei1 +bei2 +bei3 +bei4 +ben1 +ben2 +ben3 +ben4 +beng +beng1 +beng2 +beng3 +beng4 +bi1 +bi2 +bi3 +bi4 +bian1 +bian2 +bian3 +bian4 +biao1 +biao2 +biao3 +bie1 +bie2 +bie3 +bie4 +bin1 +bin4 +bing1 +bing2 +bing3 +bing4 +bo +bo1 +bo2 +bo3 +bo4 +bu2 +bu3 +bu4 +c +ca1 +cai1 +cai2 +cai3 +cai4 +can1 +can2 +can3 +can4 +cang1 +cang2 +cao1 +cao2 +cao3 +ce4 +cen1 +cen2 +ceng1 +ceng2 +ceng4 +cha1 +cha2 +cha3 +cha4 +chai1 +chai2 +chan1 +chan2 +chan3 +chan4 +chang1 +chang2 +chang3 +chang4 +chao1 +chao2 +chao3 +che1 +che2 +che3 +che4 +chen1 +chen2 +chen3 +chen4 +cheng1 +cheng2 +cheng3 +cheng4 +chi1 +chi2 +chi3 +chi4 +chong1 +chong2 +chong3 +chong4 +chou1 +chou2 +chou3 +chou4 +chu1 +chu2 +chu3 +chu4 +chua1 +chuai1 +chuai2 +chuai3 +chuai4 +chuan1 +chuan2 +chuan3 +chuan4 +chuang1 +chuang2 +chuang3 +chuang4 +chui1 +chui2 +chun1 +chun2 +chun3 +chuo1 +chuo4 +ci1 +ci2 +ci3 +ci4 +cong1 +cong2 +cou4 +cu1 +cu4 +cuan1 +cuan2 +cuan4 +cui1 +cui3 +cui4 +cun1 +cun2 +cun4 +cuo1 +cuo2 +cuo4 +d +da +da1 +da2 +da3 +da4 +dai1 +dai2 +dai3 +dai4 +dan1 +dan2 +dan3 +dan4 +dang1 +dang2 +dang3 +dang4 +dao1 +dao2 +dao3 +dao4 +de +de1 +de2 +dei3 +den4 +deng1 +deng2 +deng3 +deng4 +di1 +di2 +di3 +di4 +dia3 +dian1 +dian2 +dian3 +dian4 +diao1 +diao3 +diao4 +die1 +die2 +die4 +ding1 +ding2 +ding3 +ding4 +diu1 +dong1 +dong3 +dong4 +dou1 +dou2 +dou3 +dou4 +du1 +du2 +du3 +du4 +duan1 +duan2 +duan3 +duan4 +dui1 +dui4 +dun1 +dun3 +dun4 +duo1 +duo2 +duo3 +duo4 +e +e1 +e2 +e3 +e4 +ei2 +en1 +en4 +er +er2 +er3 +er4 +f +fa1 +fa2 +fa3 +fa4 +fan1 +fan2 +fan3 +fan4 +fang1 +fang2 +fang3 +fang4 +fei1 +fei2 +fei3 +fei4 +fen1 +fen2 +fen3 +fen4 +feng1 +feng2 +feng3 +feng4 +fo2 +fou2 +fou3 +fu1 +fu2 +fu3 +fu4 +g +ga1 +ga2 +ga3 +ga4 +gai1 +gai2 +gai3 +gai4 +gan1 +gan2 +gan3 +gan4 +gang1 +gang2 +gang3 +gang4 +gao1 +gao2 +gao3 +gao4 +ge1 +ge2 +ge3 +ge4 +gei2 +gei3 +gen1 +gen2 +gen3 +gen4 +geng1 +geng3 +geng4 +gong1 +gong3 +gong4 +gou1 +gou2 +gou3 +gou4 +gu +gu1 +gu2 +gu3 +gu4 +gua1 +gua2 +gua3 +gua4 +guai1 +guai2 +guai3 +guai4 +guan1 +guan2 +guan3 +guan4 +guang1 +guang2 +guang3 +guang4 +gui1 +gui2 +gui3 +gui4 +gun3 +gun4 +guo1 +guo2 +guo3 +guo4 +h +ha1 +ha2 +ha3 +hai1 +hai2 +hai3 +hai4 +han1 +han2 +han3 +han4 +hang1 +hang2 +hang4 +hao1 +hao2 +hao3 +hao4 +he1 +he2 +he4 +hei1 +hen2 +hen3 +hen4 +heng1 +heng2 +heng4 +hong1 +hong2 +hong3 +hong4 +hou1 +hou2 +hou3 +hou4 +hu1 +hu2 +hu3 +hu4 +hua1 +hua2 +hua4 +huai2 +huai4 +huan1 +huan2 +huan3 +huan4 +huang1 +huang2 +huang3 +huang4 +hui1 +hui2 +hui3 +hui4 +hun1 +hun2 +hun4 +huo +huo1 +huo2 +huo3 +huo4 +i +j +ji1 +ji2 +ji3 +ji4 +jia +jia1 +jia2 +jia3 +jia4 +jian1 +jian2 +jian3 +jian4 +jiang1 +jiang2 +jiang3 +jiang4 +jiao1 +jiao2 +jiao3 +jiao4 +jie1 +jie2 +jie3 +jie4 +jin1 +jin2 +jin3 +jin4 +jing1 +jing2 +jing3 +jing4 +jiong3 +jiu1 +jiu2 +jiu3 +jiu4 +ju1 +ju2 +ju3 +ju4 +juan1 +juan2 +juan3 +juan4 +jue1 +jue2 +jue4 +jun1 +jun4 +k +ka1 +ka2 +ka3 +kai1 +kai2 +kai3 +kai4 +kan1 +kan2 +kan3 +kan4 +kang1 +kang2 +kang4 +kao1 +kao2 +kao3 +kao4 +ke1 +ke2 +ke3 +ke4 +ken3 +keng1 +kong1 +kong3 +kong4 +kou1 +kou2 +kou3 +kou4 +ku1 +ku2 +ku3 +ku4 +kua1 +kua3 +kua4 +kuai3 +kuai4 +kuan1 +kuan2 +kuan3 +kuang1 +kuang2 +kuang4 +kui1 +kui2 +kui3 +kui4 +kun1 +kun3 +kun4 +kuo4 +l +la +la1 +la2 +la3 +la4 +lai2 +lai4 +lan2 +lan3 +lan4 +lang1 +lang2 +lang3 +lang4 +lao1 +lao2 +lao3 +lao4 +le +le1 +le4 +lei +lei1 +lei2 +lei3 +lei4 +leng1 +leng2 +leng3 +leng4 +li +li1 +li2 +li3 +li4 +lia3 +lian2 +lian3 +lian4 +liang2 +liang3 +liang4 +liao1 +liao2 +liao3 +liao4 +lie1 +lie2 +lie3 +lie4 +lin1 +lin2 +lin3 +lin4 +ling2 +ling3 +ling4 +liu1 +liu2 +liu3 +liu4 +long1 +long2 +long3 +long4 +lou1 +lou2 +lou3 +lou4 +lu1 +lu2 +lu3 +lu4 +luan2 +luan3 +luan4 +lun1 +lun2 +lun4 +luo1 +luo2 +luo3 +luo4 +lv2 +lv3 +lv4 +lve3 +lve4 +m +ma +ma1 +ma2 +ma3 +ma4 +mai2 +mai3 +mai4 +man1 +man2 +man3 +man4 +mang2 +mang3 +mao1 +mao2 +mao3 +mao4 +me +mei2 +mei3 +mei4 +men +men1 +men2 +men4 +meng +meng1 +meng2 +meng3 +meng4 +mi1 +mi2 +mi3 +mi4 +mian2 +mian3 +mian4 +miao1 +miao2 +miao3 +miao4 +mie1 +mie4 +min2 +min3 +ming2 +ming3 +ming4 +miu4 +mo1 +mo2 +mo3 +mo4 +mou1 +mou2 +mou3 +mu2 +mu3 +mu4 +n +n2 +na1 +na2 +na3 +na4 +nai2 +nai3 +nai4 +nan1 +nan2 +nan3 +nan4 +nang1 +nang2 +nang3 +nao1 +nao2 +nao3 +nao4 +ne +ne2 +ne4 +nei3 +nei4 +nen4 +neng2 +ni1 +ni2 +ni3 +ni4 +nian1 +nian2 +nian3 +nian4 +niang2 +niang4 +niao2 +niao3 +niao4 +nie1 +nie4 +nin2 +ning2 +ning3 +ning4 +niu1 +niu2 +niu3 +niu4 +nong2 +nong4 +nou4 +nu2 +nu3 +nu4 +nuan3 +nuo2 +nuo4 +nv2 +nv3 +nve4 +o +o1 +o2 +ou1 +ou2 +ou3 +ou4 +p +pa1 +pa2 +pa4 +pai1 +pai2 +pai3 +pai4 +pan1 +pan2 +pan4 +pang1 +pang2 +pang4 +pao1 +pao2 +pao3 +pao4 +pei1 +pei2 +pei4 +pen1 +pen2 +pen4 +peng1 +peng2 +peng3 +peng4 +pi1 +pi2 +pi3 +pi4 +pian1 +pian2 +pian4 +piao1 +piao2 +piao3 +piao4 +pie1 +pie2 +pie3 +pin1 +pin2 +pin3 +pin4 +ping1 +ping2 +po1 +po2 +po3 +po4 +pou1 +pu1 +pu2 +pu3 +pu4 +q +qi1 +qi2 +qi3 +qi4 +qia1 +qia3 +qia4 +qian1 +qian2 +qian3 +qian4 +qiang1 +qiang2 +qiang3 +qiang4 +qiao1 +qiao2 +qiao3 +qiao4 +qie1 +qie2 +qie3 +qie4 +qin1 +qin2 +qin3 +qin4 +qing1 +qing2 +qing3 +qing4 +qiong1 +qiong2 +qiu1 +qiu2 +qiu3 +qu1 +qu2 +qu3 +qu4 +quan1 +quan2 +quan3 +quan4 +que1 +que2 +que4 +qun2 +r +ran2 +ran3 +rang1 +rang2 +rang3 +rang4 +rao2 +rao3 +rao4 +re2 +re3 +re4 +ren2 +ren3 +ren4 +reng1 +reng2 +ri4 +rong1 +rong2 +rong3 +rou2 +rou4 +ru2 +ru3 +ru4 +ruan2 +ruan3 +rui3 +rui4 +run4 +ruo4 +s +sa1 +sa2 +sa3 +sa4 +sai1 +sai4 +san1 +san2 +san3 +san4 +sang1 +sang3 +sang4 +sao1 +sao2 +sao3 +sao4 +se4 +sen1 +seng1 +sha1 +sha2 +sha3 +sha4 +shai1 +shai2 +shai3 +shai4 +shan1 +shan3 +shan4 +shang +shang1 +shang3 +shang4 +shao1 +shao2 +shao3 +shao4 +she1 +she2 +she3 +she4 +shei2 +shen1 +shen2 +shen3 +shen4 +sheng1 +sheng2 +sheng3 +sheng4 +shi +shi1 +shi2 +shi3 +shi4 +shou1 +shou2 +shou3 +shou4 +shu1 +shu2 +shu3 +shu4 +shua1 +shua2 +shua3 +shua4 +shuai1 +shuai3 +shuai4 +shuan1 +shuan4 +shuang1 +shuang3 +shui2 +shui3 +shui4 +shun3 +shun4 +shuo1 +shuo4 +si1 +si2 +si3 +si4 +song1 +song3 +song4 +sou1 +sou3 +sou4 +su1 +su2 +su4 +suan1 +suan4 +sui1 +sui2 +sui3 +sui4 +sun1 +sun3 +suo +suo1 +suo2 +suo3 +t +ta1 +ta2 +ta3 +ta4 +tai1 +tai2 +tai4 +tan1 +tan2 +tan3 +tan4 +tang1 +tang2 +tang3 +tang4 +tao1 +tao2 +tao3 +tao4 +te4 +teng2 +ti1 +ti2 +ti3 +ti4 +tian1 +tian2 +tian3 +tiao1 +tiao2 +tiao3 +tiao4 +tie1 +tie2 +tie3 +tie4 +ting1 +ting2 +ting3 +tong1 +tong2 +tong3 +tong4 +tou +tou1 +tou2 +tou4 +tu1 +tu2 +tu3 +tu4 +tuan1 +tuan2 +tui1 +tui2 +tui3 +tui4 +tun1 +tun2 +tun4 +tuo1 +tuo2 +tuo3 +tuo4 +u +v +w +wa +wa1 +wa2 +wa3 +wa4 +wai1 +wai3 +wai4 +wan1 +wan2 +wan3 +wan4 +wang1 +wang2 +wang3 +wang4 +wei1 +wei2 +wei3 +wei4 +wen1 +wen2 +wen3 +wen4 +weng1 +weng4 +wo1 +wo2 +wo3 +wo4 +wu1 +wu2 +wu3 +wu4 +x +xi1 +xi2 +xi3 +xi4 +xia1 +xia2 +xia4 +xian1 +xian2 +xian3 +xian4 +xiang1 +xiang2 +xiang3 +xiang4 +xiao1 +xiao2 +xiao3 +xiao4 +xie1 +xie2 +xie3 +xie4 +xin1 +xin2 +xin4 +xing1 +xing2 +xing3 +xing4 +xiong1 +xiong2 +xiu1 +xiu3 +xiu4 +xu +xu1 +xu2 +xu3 +xu4 +xuan1 +xuan2 +xuan3 +xuan4 +xue1 +xue2 +xue3 +xue4 +xun1 +xun2 +xun4 +y +ya +ya1 +ya2 +ya3 +ya4 +yan1 +yan2 +yan3 +yan4 +yang1 +yang2 +yang3 +yang4 +yao1 +yao2 +yao3 +yao4 +ye1 +ye2 +ye3 +ye4 +yi +yi1 +yi2 +yi3 +yi4 +yin1 +yin2 +yin3 +yin4 +ying1 +ying2 +ying3 +ying4 +yo1 +yong1 +yong2 +yong3 +yong4 +you1 +you2 +you3 +you4 +yu1 +yu2 +yu3 +yu4 +yuan1 +yuan2 +yuan3 +yuan4 +yue1 +yue4 +yun1 +yun2 +yun3 +yun4 +z +za1 +za2 +za3 +zai1 +zai3 +zai4 +zan1 +zan2 +zan3 +zan4 +zang1 +zang4 +zao1 +zao2 +zao3 +zao4 +ze2 +ze4 +zei2 +zen3 +zeng1 +zeng4 +zha1 +zha2 +zha3 +zha4 +zhai1 +zhai2 +zhai3 +zhai4 +zhan1 +zhan2 +zhan3 +zhan4 +zhang1 +zhang2 +zhang3 +zhang4 +zhao1 +zhao2 +zhao3 +zhao4 +zhe +zhe1 +zhe2 +zhe3 +zhe4 +zhen1 +zhen2 +zhen3 +zhen4 +zheng1 +zheng2 +zheng3 +zheng4 +zhi1 +zhi2 +zhi3 +zhi4 +zhong1 +zhong2 +zhong3 +zhong4 +zhou1 +zhou2 +zhou3 +zhou4 +zhu1 +zhu2 +zhu3 +zhu4 +zhua1 +zhua2 +zhua3 +zhuai1 +zhuai3 +zhuai4 +zhuan1 +zhuan2 +zhuan3 +zhuan4 +zhuang1 +zhuang4 +zhui1 +zhui4 +zhun1 +zhun2 +zhun3 +zhuo1 +zhuo2 +zi +zi1 +zi2 +zi3 +zi4 +zong1 +zong2 +zong3 +zong4 +zou1 +zou2 +zou3 +zou4 +zu1 +zu2 +zu3 +zuan1 +zuan3 +zuan4 +zui2 +zui3 +zui4 +zun1 +zuo +zuo1 +zuo2 +zuo3 +zuo4 +{ +~ +¡ +¢ +£ +¥ +§ +¨ +© +« +® +¯ +° +± +² +³ +´ +µ +· +¹ +º +» +¼ +½ +¾ +¿ +À +Á + +à +Ä +Å +Æ +Ç +È +É +Ê +Í +Î +Ñ +Ó +Ö +× +Ø +Ú +Ü +Ý +Þ +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +ì +í +î +ï +ð +ñ +ò +ó +ô +õ +ö +ø +ù +ú +û +ü +ý +Ā +ā +ă +ą +ć +Č +č +Đ +đ +ē +ė +ę +ě +ĝ +ğ +ħ +ī +į +İ +ı +Ł +ł +ń +ņ +ň +ŋ +Ō +ō +ő +œ +ř +Ś +ś +Ş +ş +Š +š +Ť +ť +ũ +ū +ź +Ż +ż +Ž +ž +ơ +ư +ǎ +ǐ +ǒ +ǔ +ǚ +ș +ț +ɑ +ɔ +ɕ +ə +ɛ +ɜ +ɡ +ɣ +ɪ +ɫ +ɴ +ɹ +ɾ +ʃ +ʊ +ʌ +ʒ +ʔ +ʰ +ʷ +ʻ +ʾ +ʿ +ˈ +ː +˙ +˜ +ˢ +́ +̅ +Α +Β +Δ +Ε +Θ +Κ +Λ +Μ +Ξ +Π +Σ +Τ +Φ +Χ +Ψ +Ω +ά +έ +ή +ί +α +β +γ +δ +ε +ζ +η +θ +ι +κ +λ +μ +ν +ξ +ο +π +ρ +ς +σ +τ +υ +φ +χ +ψ +ω +ϊ +ό +ύ +ώ +ϕ +ϵ +Ё +А +Б +В +Г +Д +Е +Ж +З +И +Й +К +Л +М +Н +О +П +Р +С +Т +У +Ф +Х +Ц +Ч +Ш +Щ +Ы +Ь +Э +Ю +Я +а +б +в +г +д +е +ж +з +и +й +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ы +ь +э +ю +я +ё +і +ְ +ִ +ֵ +ֶ +ַ +ָ +ֹ +ּ +־ +ׁ +א +ב +ג +ד +ה +ו +ז +ח +ט +י +כ +ל +ם +מ +ן +נ +ס +ע +פ +ק +ר +ש +ת +أ +ب +ة +ت +ج +ح +د +ر +ز +س +ص +ط +ع +ق +ك +ل +م +ن +ه +و +ي +َ +ُ +ِ +ْ +ก +ข +ง +จ +ต +ท +น +ป +ย +ร +ว +ส +ห +อ +ฮ +ั +า +ี +ึ +โ +ใ +ไ +่ +้ +์ +ḍ +Ḥ +ḥ +ṁ +ṃ +ṅ +ṇ +Ṛ +ṛ +Ṣ +ṣ +Ṭ +ṭ +ạ +ả +Ấ +ấ +ầ +ậ +ắ +ằ +ẻ +ẽ +ế +ề +ể +ễ +ệ +ị +ọ +ỏ +ố +ồ +ộ +ớ +ờ +ở +ụ +ủ +ứ +ữ +ἀ +ἁ +Ἀ +ἐ +ἔ +ἰ +ἱ +ὀ +ὁ +ὐ +ὲ +ὸ +ᾶ +᾽ +ῆ +ῇ +ῶ +‎ +‑ +‒ +– +— +― +‖ +† +‡ +• +… +‧ +‬ +′ +″ +⁄ +⁡ +⁰ +⁴ +⁵ +⁶ +⁷ +⁸ +⁹ +₁ +₂ +₃ +€ +₱ +₹ +₽ +℃ +ℏ +ℓ +№ +ℝ +™ +⅓ +⅔ +⅛ +→ +∂ +∈ +∑ +− +∗ +√ +∞ +∫ +≈ +≠ +≡ +≤ +≥ +⋅ +⋯ +█ +♪ +⟨ +⟩ +、 +。 +《 +》 +「 +」 +【 +】 +あ +う +え +お +か +が +き +ぎ +く +ぐ +け +げ +こ +ご +さ +し +じ +す +ず +せ +ぜ +そ +ぞ +た +だ +ち +っ +つ +で +と +ど +な +に +ね +の +は +ば +ひ +ぶ +へ +べ +ま +み +む +め +も +ゃ +や +ゆ +ょ +よ +ら +り +る +れ +ろ +わ +を +ん +ァ +ア +ィ +イ +ウ +ェ +エ +オ +カ +ガ +キ +ク +ケ +ゲ +コ +ゴ +サ +ザ +シ +ジ +ス +ズ +セ +ゾ +タ +ダ +チ +ッ +ツ +テ +デ +ト +ド +ナ +ニ +ネ +ノ +バ +パ +ビ +ピ +フ +プ +ヘ +ベ +ペ +ホ +ボ +ポ +マ +ミ +ム +メ +モ +ャ +ヤ +ュ +ユ +ョ +ヨ +ラ +リ +ル +レ +ロ +ワ +ン +・ +ー +ㄋ +ㄍ +ㄎ +ㄏ +ㄓ +ㄕ +ㄚ +ㄜ +ㄟ +ㄤ +ㄥ +ㄧ +ㄱ +ㄴ +ㄷ +ㄹ +ㅁ +ㅂ +ㅅ +ㅈ +ㅍ +ㅎ +ㅏ +ㅓ +ㅗ +ㅜ +ㅡ +ㅣ +㗎 +가 +각 +간 +갈 +감 +갑 +갓 +갔 +강 +같 +개 +거 +건 +걸 +겁 +것 +겉 +게 +겠 +겨 +결 +겼 +경 +계 +고 +곤 +골 +곱 +공 +과 +관 +광 +교 +구 +국 +굴 +귀 +귄 +그 +근 +글 +금 +기 +긴 +길 +까 +깍 +깔 +깜 +깨 +께 +꼬 +꼭 +꽃 +꾸 +꿔 +끔 +끗 +끝 +끼 +나 +난 +날 +남 +납 +내 +냐 +냥 +너 +넘 +넣 +네 +녁 +년 +녕 +노 +녹 +놀 +누 +눈 +느 +는 +늘 +니 +님 +닙 +다 +닥 +단 +달 +닭 +당 +대 +더 +덕 +던 +덥 +데 +도 +독 +동 +돼 +됐 +되 +된 +될 +두 +둑 +둥 +드 +들 +등 +디 +따 +딱 +딸 +땅 +때 +떤 +떨 +떻 +또 +똑 +뚱 +뛰 +뜻 +띠 +라 +락 +란 +람 +랍 +랑 +래 +랜 +러 +런 +럼 +렇 +레 +려 +력 +렵 +렸 +로 +록 +롬 +루 +르 +른 +를 +름 +릉 +리 +릴 +림 +마 +막 +만 +많 +말 +맑 +맙 +맛 +매 +머 +먹 +멍 +메 +면 +명 +몇 +모 +목 +몸 +못 +무 +문 +물 +뭐 +뭘 +미 +민 +밌 +밑 +바 +박 +밖 +반 +받 +발 +밤 +밥 +방 +배 +백 +밸 +뱀 +버 +번 +벌 +벚 +베 +벼 +벽 +별 +병 +보 +복 +본 +볼 +봐 +봤 +부 +분 +불 +비 +빔 +빛 +빠 +빨 +뼈 +뽀 +뿅 +쁘 +사 +산 +살 +삼 +샀 +상 +새 +색 +생 +서 +선 +설 +섭 +섰 +성 +세 +셔 +션 +셨 +소 +속 +손 +송 +수 +숙 +순 +술 +숫 +숭 +숲 +쉬 +쉽 +스 +슨 +습 +슷 +시 +식 +신 +실 +싫 +심 +십 +싶 +싸 +써 +쓰 +쓴 +씌 +씨 +씩 +씬 +아 +악 +안 +않 +알 +야 +약 +얀 +양 +얘 +어 +언 +얼 +엄 +업 +없 +었 +엉 +에 +여 +역 +연 +염 +엽 +영 +옆 +예 +옛 +오 +온 +올 +옷 +옹 +와 +왔 +왜 +요 +욕 +용 +우 +운 +울 +웃 +워 +원 +월 +웠 +위 +윙 +유 +육 +윤 +으 +은 +을 +음 +응 +의 +이 +익 +인 +일 +읽 +임 +입 +있 +자 +작 +잔 +잖 +잘 +잡 +잤 +장 +재 +저 +전 +점 +정 +제 +져 +졌 +조 +족 +좀 +종 +좋 +죠 +주 +준 +줄 +중 +줘 +즈 +즐 +즘 +지 +진 +집 +짜 +짝 +쩌 +쪼 +쪽 +쫌 +쭈 +쯔 +찌 +찍 +차 +착 +찾 +책 +처 +천 +철 +체 +쳐 +쳤 +초 +촌 +추 +출 +춤 +춥 +춰 +치 +친 +칠 +침 +칩 +칼 +커 +켓 +코 +콩 +쿠 +퀴 +크 +큰 +큽 +키 +킨 +타 +태 +터 +턴 +털 +테 +토 +통 +투 +트 +특 +튼 +틀 +티 +팀 +파 +팔 +패 +페 +펜 +펭 +평 +포 +폭 +표 +품 +풍 +프 +플 +피 +필 +하 +학 +한 +할 +함 +합 +항 +해 +햇 +했 +행 +허 +험 +형 +혜 +호 +혼 +홀 +화 +회 +획 +후 +휴 +흐 +흔 +희 +히 +힘 +ﷺ +ﷻ +! +, +? +� +𠮶 diff --git a/extend_embedding_pretrained.py b/extend_embedding_pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..ce9389952f731a36733e62f6e3e3d471600885d7 --- /dev/null +++ b/extend_embedding_pretrained.py @@ -0,0 +1,95 @@ +""" +Mô-đun mở rộng embedding của mô hình bằng cách thêm token mới vào vocab. +Áp dụng khi fine-tuning mô hình F5-TTS. +""" + +import os +import random +import torch +from cached_path import cached_path +from safetensors.torch import load_file + +# Định nghĩa seed để đảm bảo tái lập kết quả +SEED = 666 + +def set_random_seed(seed: int): + """ Đặt seed cho các thư viện ngẫu nhiên để đảm bảo reproducibility. """ + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def load_vocab(file_path: str) -> list: + """ Đọc danh sách token từ file vocab. """ + if not os.path.exists(file_path): + raise FileNotFoundError(f"Không tìm thấy file: {file_path}") + + with open(file_path, "r", encoding="utf8") as file: + return [line.strip() for line in file.readlines()] + + +def expand_model_embeddings(ckpt_path: str, new_ckpt_path: str, num_new_tokens: int = 42): + """ + Mở rộng embedding của mô hình bằng cách thêm token mới. + + Args: + ckpt_path (str): Đường dẫn đến file checkpoint gốc. + new_ckpt_path (str): Đường dẫn để lưu checkpoint đã mở rộng. + num_new_tokens (int): Số lượng token mới cần thêm vào. + """ + if ckpt_path.endswith(".safetensors"): + ckpt = load_file(ckpt_path, device="cpu") + ckpt = {"ema_model_state_dict": ckpt} + elif ckpt_path.endswith(".pt"): + ckpt = torch.load(ckpt_path, map_location="cpu") + else: + raise ValueError("Định dạng checkpoint không được hỗ trợ. Chỉ hỗ trợ .safetensors hoặc .pt") + + ema_sd = ckpt.get("ema_model_state_dict", {}) + embed_key_ema = "ema_model.transformer.text_embed.text_embed.weight" + + if embed_key_ema not in ema_sd: + raise KeyError(f"Không tìm thấy khóa {embed_key_ema} trong checkpoint.") + + old_embed_ema = ema_sd[embed_key_ema] + vocab_old, embed_dim = old_embed_ema.shape + vocab_new = vocab_old + num_new_tokens + + def expand_embeddings(old_embeddings: torch.Tensor) -> torch.Tensor: + """ Mở rộng embeddings bằng cách thêm vector mới. """ + new_embeddings = torch.zeros((vocab_new, embed_dim)) + new_embeddings[:vocab_old] = old_embeddings + new_embeddings[vocab_old:] = torch.randn((num_new_tokens, embed_dim)) + return new_embeddings + + ema_sd[embed_key_ema] = expand_embeddings(ema_sd[embed_key_ema]) + torch.save(ckpt, new_ckpt_path) + + +if __name__ == "__main__": + # Thiết lập seed ngẫu nhiên + set_random_seed(SEED) + + # Đường dẫn file vocab + TOKEN_PRETRAINED_PATH = "data/Emilia_ZH_EN_pinyin/vocab.txt" + TOKEN_NEW_PATH = "data/your_training_dataset/vocab.txt" + + # Load vocab + tokens_pretrained = load_vocab(TOKEN_PRETRAINED_PATH) + tokens_new = load_vocab(TOKEN_NEW_PATH) + + # Số lượng token mới cần thêm + vocab_size_new = len(tokens_new) - len(tokens_pretrained) + + # Đường dẫn checkpoint + ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt")) + new_ckpt_path = "ckpts/your_training_dataset/pretrained_model_1200000.pt" + + # Mở rộng embedding + expand_model_embeddings(ckpt_path, new_ckpt_path, num_new_tokens=vocab_size_new) + + print(f"Checkpoint đã được mở rộng và lưu tại: {new_ckpt_path}") \ No newline at end of file diff --git a/fine_tuning.sh b/fine_tuning.sh new file mode 100644 index 0000000000000000000000000000000000000000..4eaf898c9c052432446e20f2ee90b90c3d22397a --- /dev/null +++ b/fine_tuning.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Thiết lập GPU sử dụng +export CUDA_VISIBLE_DEVICES=0 # 0 nếu như bạn có GPU của nvidia :v + +log() { + echo "$@" +} + +# Tạo thư mục cần thiết, +DATASET_DIR="data/your_training_dataset" +mkdir -p "$DATASET_DIR" +# Bắt buộc phải có thư mục data/your_dataset chứa các file .wav, file .txt tương ứng, các bạn tự xử lý +mkdir -p data/your_dataset + +# Định nghĩa các tham số huấn luyện +EXP_NAME="F5TTS_Base" +DATASET_NAME="your_training_dataset" +BATCH_SIZE=7000 +NUM_WOKERS=16 +WARMUP_UPDATES=20000 +SAVE_UPDATES=10000 +LAST_UPDATES=10000 +PRETRAIN_CKPT="/mnt/d/ckpts/your_training_dataset/pretrained_model_1200000.pt" + +# Tạo các biến stage để quản lý pipeline, bước nào đã chạy rồi thì không cần chạy lại +stage=5 +stop_stage=5 + +# Chuẩn hoá sample_rate, bỏ qua stage này nếu audio của bạn đã ở định dạng 24Khz +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Convert sample rate: data/your_dataset ..." + python convert_sr.py +fi + +# Chuẩn bị dữ liệu audio_name và text tương ứng +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Preparing metadata at: data/your_dataset ..." + python prepare_metadata.py +fi + +# Bổ sung từ vựng trong bộ dữ liệu của bạn chưa có trong từ vựng của mô hình pretrained +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Checking missing token in pretrained vocab ... " + python check_vocab_pretrained.py +fi + +# Mở rộng embedding của mô hình pretrained để hỗ trợ bộ từ vựng mới +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "Extend embedding pretrained with new vocab ... " + python extend_embedding_pretrained.py +fi + +# Trích xuất đặc trưng +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "Feature extraction ... " + python src/f5_tts/train/datasets/prepare_csv_wavs.py "$DATASET_DIR" "$DATASET_DIR" --workers "$NUM_WOKERS" +fi + +# Chạy quá trình fine-tuning +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "Start fine-tuning F5-TTS with your dataset ... " + python src/f5_tts/train/finetune_cli.py \ + --exp_name "$EXP_NAME" \ + --dataset_name "$DATASET_NAME" \ + --batch_size_per_gpu "$BATCH_SIZE" \ + --num_warmup_updates "$WARMUP_UPDATES" \ + --save_per_updates "$SAVE_UPDATES" \ + --last_per_updates "$LAST_UPDATES" \ + --finetune \ + --log_samples \ + --pretrain "$PRETRAIN_CKPT" + ### Nếu bạn muốn training với nhiều gpu, sử dụng câu lệnh bên dưới: + # accelerate launch src/f5_tts/train/finetune_cli.py \ + # --exp_name "$EXP_NAME" \ + # --dataset_name "$DATASET_NAME" \ + # --batch_size_per_gpu "$BATCH_SIZE" \ + # --num_warmup_updates "$WARMUP_UPDATES" \ + # --save_per_updates "$SAVE_UPDATES" \ + # --last_per_updates "$LAST_UPDATES" \ + # --finetune \ + # --log_samples \ + # --pretrain "$PRETRAIN_CKPT" +fi + +log "Fine-tuning F5-TTS done." diff --git a/infer.sh b/infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..5bca675d8db72102439159befe0eb54a01ecea3d --- /dev/null +++ b/infer.sh @@ -0,0 +1,9 @@ +f5-tts_infer-cli \ +--model "F5TTS_Base" \ +--ref_audio ref.wav \ +--ref_text "cả hai bên hãy cố gắng hiểu cho nhau" \ +--gen_text "mình muốn ra nước ngoài để tiếp xúc nhiều công ty lớn, sau đó mang những gì học được về việt nam giúp xây dựng các công trình tốt hơn" \ +--speed 1.0 \ +--vocoder_name vocos \ +--vocab_file data/your_training_dataset/vocab.txt \ +--ckpt_file ckpts/your_training_dataset/model_last.pt \ \ No newline at end of file diff --git a/prepare_metadata.py b/prepare_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..32c6e0a8ccbdb2d7ce3e4055870ad62e910e433f --- /dev/null +++ b/prepare_metadata.py @@ -0,0 +1,81 @@ +""" +Mô-đun chuẩn bị metadata cho tập dữ liệu huấn luyện. +Tạo file metadata.csv và vocab từ tập dữ liệu âm thanh. +""" + +import os +import glob +import shutil +import soundfile as sf +from tqdm import tqdm + +# Đường dẫn dữ liệu +DATASET_DIR = "data/your_dataset" +TRAINING_DIR = "data/your_training_dataset" +WAVS_DIR = os.path.join(TRAINING_DIR, "wavs") +METADATA_PATH = os.path.join(TRAINING_DIR, "metadata.csv") +VOCAB_PATH = os.path.join(TRAINING_DIR, "vocab_your_dataset.txt") + +# Tạo thư mục đích nếu chưa tồn tại +os.makedirs(WAVS_DIR, exist_ok=True) + +def get_audio_duration(wav_path: str) -> float: + """ + Tính thời lượng của file audio. + + Args: + wav_path (str): Đường dẫn file WAV. + + Returns: + float: Thời lượng của file (giây). + """ + audio_data, sr = sf.read(wav_path) + return len(audio_data) / sr + + +def process_dataset(): + """ + Duyệt qua tất cả file WAV, copy vào thư mục mới, tạo metadata và vocab. + """ + wav_paths = glob.glob(os.path.join(DATASET_DIR, "*.wav")) + tokens = set() + + with open(METADATA_PATH, "w", encoding="utf8") as fw: + for wav_path in tqdm(wav_paths, desc="Processing dataset"): + wav_name = os.path.basename(wav_path) + wav_dest_path = os.path.join(WAVS_DIR, wav_name) + + # Copy file âm thanh sang thư mục mới + shutil.copy(wav_path, wav_dest_path) + + # Đọc nội dung text + txt_path = wav_path.replace(".wav", ".txt") + if not os.path.exists(txt_path): + continue + + with open(txt_path, "r", encoding="utf8") as fr: + text = fr.readline().strip().lower() + text = text.replace("_", " ") + text = " ".join(text.split()) + + # Bỏ qua file không đạt yêu cầu + duration = get_audio_duration(wav_path) + if duration < 1 or duration > 30 or len(text.split()) < 3: + continue + + # Ghi vào metadata.csv + fw.write(f"wavs/{wav_name}|{text}\n") + + # Thu thập token cho vocab + tokens.update(text) + + # Ghi vocab vào file + with open(VOCAB_PATH, "w", encoding="utf8") as fw_vocab: + fw_vocab.write("\n".join(sorted(tokens))) + + print(f"Metadata lưu tại: {METADATA_PATH}") + print(f"Vocab lưu tại: {VOCAB_PATH}") + + +if __name__ == "__main__": + process_dataset() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..f76806a9f7d001e7dda3e8e95cef243493ce4f2e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,62 @@ +[build-system] +requires = ["setuptools >= 61.0", "setuptools-scm>=8.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "f5-tts" +version = "1.0.1" +description = "F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching" +readme = "README.md" +license = {text = "MIT License"} +classifiers = [ + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", +] +dependencies = [ + "accelerate>=0.33.0", + "bitsandbytes>0.37.0; platform_machine != 'arm64' and platform_system != 'Darwin'", + "cached_path", + "click", + "datasets", + "ema_pytorch>=0.5.2", + "gradio>=3.45.2", + "hydra-core>=1.3.0", + "jieba", + "librosa", + "matplotlib", + "numpy<=1.26.4", + "pydub", + "pypinyin", + "safetensors", + "soundfile", + "tomli", + "torch>=2.0.0", + "torchaudio>=2.0.0", + "torchdiffeq", + "tqdm>=4.65.0", + "transformers", + "transformers_stream_generator", + "vocos", + "wandb", + "x_transformers>=1.31.14", +] + +[project.optional-dependencies] +eval = [ + "faster_whisper==0.10.1", + "funasr", + "jiwer", + "modelscope", + "zhconv", + "zhon", +] + +[project.urls] +Homepage = "https://github.com/SWivid/F5-TTS" + +[project.scripts] +"f5-tts_infer-cli" = "f5_tts.infer.infer_cli:main" +"f5-tts_infer-gradio" = "f5_tts.infer.infer_gradio:main" +"f5-tts_finetune-cli" = "f5_tts.train.finetune_cli:main" +"f5-tts_finetune-gradio" = "f5_tts.train.finetune_gradio:main" diff --git a/src/f5_tts.egg-info/PKG-INFO b/src/f5_tts.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..3a3ecf5c6b8cf0fbb0f66dfd9603b29b19c3574c --- /dev/null +++ b/src/f5_tts.egg-info/PKG-INFO @@ -0,0 +1,156 @@ +Metadata-Version: 2.4 +Name: f5-tts +Version: 1.0.1 +Summary: F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching +License: MIT License +Project-URL: Homepage, https://github.com/SWivid/F5-TTS +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: accelerate>=0.33.0 +Requires-Dist: bitsandbytes>0.37.0; platform_machine != "arm64" and platform_system != "Darwin" +Requires-Dist: cached_path +Requires-Dist: click +Requires-Dist: datasets +Requires-Dist: ema_pytorch>=0.5.2 +Requires-Dist: gradio>=3.45.2 +Requires-Dist: hydra-core>=1.3.0 +Requires-Dist: jieba +Requires-Dist: librosa +Requires-Dist: matplotlib +Requires-Dist: numpy<=1.26.4 +Requires-Dist: pydub +Requires-Dist: pypinyin +Requires-Dist: safetensors +Requires-Dist: soundfile +Requires-Dist: tomli +Requires-Dist: torch>=2.0.0 +Requires-Dist: torchaudio>=2.0.0 +Requires-Dist: torchdiffeq +Requires-Dist: tqdm>=4.65.0 +Requires-Dist: transformers +Requires-Dist: transformers_stream_generator +Requires-Dist: vocos +Requires-Dist: wandb +Requires-Dist: x_transformers>=1.31.14 +Provides-Extra: eval +Requires-Dist: faster_whisper==0.10.1; extra == "eval" +Requires-Dist: funasr; extra == "eval" +Requires-Dist: jiwer; extra == "eval" +Requires-Dist: modelscope; extra == "eval" +Requires-Dist: zhconv; extra == "eval" +Requires-Dist: zhon; extra == "eval" +Dynamic: license-file + +# F5-TTS-Vietnamese +![F5-TTS Architecture](tests/f5-tts.png) + +A fine-tuning pipeline for training a Vietnamese speech synthesis model using the F5-TTS architecture. + +Try demo at: https://huggingface.co/spaces/hynt/F5-TTS-Vietnamese-100h + +## Tips for training +- 100 hours of data is generally sufficient to train a Vietnamese Text-to-Speech model for specific voices. However, to achieve optimal performance in voice cloning across a wide range of speakers, a larger dataset is recommended. I fine-tuned an F5-TTS model on approximately 1000 hours of data, which resulted in excellent voice cloning performance. +- Having a large amount of speaker hours with highly accurate transcriptions is crucial — the more, the better. This helps the model generalize better to unseen speakers, resulting in lower WER after training and reducing hallucinations. + +## Tips for inference +- It is recommended to select sample audios that are clear and have minimal interruptions, and should be less than 10 seconds long, as this will improve the synthesis results. +- If the reference audio text is not provided, the default model used will be whisper-large-v3-turbo. Consequently, Vietnamese may not be accurately recognized in some cases, which can result in poor speech synthesis quality. +- In case you want to synthesize speech from a long text paragraph, it is recommended to replace the chunks function (located in **src/f5_tts/infer/utils_infer.py**) with the modified chunk_text function below: + +```bash +def chunk_text(text, max_chars=135): + sentences = [s.strip() for s in text.split('. ') if s.strip()] + i = 0 + while i < len(sentences): + if len(sentences[i].split()) < 4: + if i == 0: + # Merge with the next sentence + sentences[i + 1] = sentences[i] + ', ' + sentences[i + 1] + del sentences[i] + else: + # Merge with the previous sentence + sentences[i - 1] = sentences[i - 1] + ', ' + sentences[i] + del sentences[i] + i -= 1 + else: + i += 1 + + final_sentences = [] + for sentence in sentences: + parts = [p.strip() for p in sentence.split(', ')] + buffer = [] + for part in parts: + buffer.append(part) + total_words = sum(len(p.split()) for p in buffer) + if total_words > 20: + # Split into separate chunks + long_part = ', '.join(buffer) + final_sentences.append(long_part) + buffer = [] + if buffer: + final_sentences.append(', '.join(buffer)) + + if len(final_sentences[-1].split()) < 4 and len(final_sentences) >= 2: + final_sentences[-2] = final_sentences[-2] + ", " + final_sentences[-1] + final_sentences = final_sentences[0:-1] + + return final_sentences +``` + +## Installation + +### Create a separate environment if needed + +```bash +# Create a python 3.10 conda env (you could also use virtualenv) +conda create -n f5-tts python=3.10 +conda activate f5-tts +``` + +### Install PyTorch + +> ```bash +> # Install pytorch with your CUDA version, e.g. +> pip install torch==2.4.0+cu124 torchaudio==2.4.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 +> ``` + +### Install f5-tts module: + +> ```bash +> cd F5-TTS-Vietnamese +> pip install -e . +> ``` + +### Install sox, ffmpeg + +> ```bash +> sudo apt-get update +> sudo apt-get install sox ffmpeg +> ``` + +## Fine-tuning pipline + +Steps: + +- Prepare `audio_name` and corresponding transcriptions +- Add missing vocabulary from your dataset to the pretrained model +- Expand the model's embedding to support the updated vocabulary +- Perform feature extraction +- Fine-tune the model + +```bash +bash fine_tuning.sh +``` + +### Inference + +```bash +bash infer.sh +``` + +### References + +- Original F5-TTS repository: [https://github.com/SWivid/F5-TTS](https://github.com/SWivid/F5-TTS) diff --git a/src/f5_tts.egg-info/SOURCES.txt b/src/f5_tts.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..7311c7c5335f9977f269802aa205340c6846d0b3 --- /dev/null +++ b/src/f5_tts.egg-info/SOURCES.txt @@ -0,0 +1,80 @@ +.gitignore +.gitmodules +Dockerfile +LICENSE +README.md +check_vocab_pretrained.py +convert_sr.py +extend_embedding_pretrained.py +fine_tuning.sh +infer.sh +prepare_metadata.py +pyproject.toml +data/Emilia_ZH_EN_pinyin/vocab.txt +src/f5_tts/api.py +src/f5_tts/socket_client.py +src/f5_tts/socket_server.py +src/f5_tts.egg-info/PKG-INFO +src/f5_tts.egg-info/SOURCES.txt +src/f5_tts.egg-info/dependency_links.txt +src/f5_tts.egg-info/entry_points.txt +src/f5_tts.egg-info/requires.txt +src/f5_tts.egg-info/top_level.txt +src/f5_tts/configs/E2TTS_Base.yaml +src/f5_tts/configs/E2TTS_Small.yaml +src/f5_tts/configs/F5TTS_Base.yaml +src/f5_tts/configs/F5TTS_Small.yaml +src/f5_tts/configs/F5TTS_v1_Base.yaml +src/f5_tts/eval/README.md +src/f5_tts/eval/ecapa_tdnn.py +src/f5_tts/eval/eval_infer_batch.py +src/f5_tts/eval/eval_infer_batch.sh +src/f5_tts/eval/eval_librispeech_test_clean.py +src/f5_tts/eval/eval_seedtts_testset.py +src/f5_tts/eval/eval_utmos.py +src/f5_tts/eval/utils_eval.py +src/f5_tts/infer/README.md +src/f5_tts/infer/SHARED.md +src/f5_tts/infer/infer_cli.py +src/f5_tts/infer/infer_gradio.py +src/f5_tts/infer/speech_edit.py +src/f5_tts/infer/utils_infer.py +src/f5_tts/infer/examples/vocab.txt +src/f5_tts/infer/examples/basic/basic.toml +src/f5_tts/infer/examples/basic/basic_ref_en.wav +src/f5_tts/infer/examples/basic/basic_ref_zh.wav +src/f5_tts/infer/examples/multi/country.flac +src/f5_tts/infer/examples/multi/main.flac +src/f5_tts/infer/examples/multi/story.toml +src/f5_tts/infer/examples/multi/story.txt +src/f5_tts/infer/examples/multi/town.flac +src/f5_tts/model/__init__.py +src/f5_tts/model/cfm.py +src/f5_tts/model/dataset.py +src/f5_tts/model/modules.py +src/f5_tts/model/trainer.py +src/f5_tts/model/utils.py +src/f5_tts/model/__pycache__/__init__.cpython-310.pyc +src/f5_tts/model/__pycache__/cfm.cpython-310.pyc +src/f5_tts/model/__pycache__/dataset.cpython-310.pyc +src/f5_tts/model/__pycache__/modules.cpython-310.pyc +src/f5_tts/model/__pycache__/trainer.cpython-310.pyc +src/f5_tts/model/__pycache__/utils.cpython-310.pyc +src/f5_tts/model/backbones/README.md +src/f5_tts/model/backbones/dit.py +src/f5_tts/model/backbones/mmdit.py +src/f5_tts/model/backbones/unett.py +src/f5_tts/model/backbones/__pycache__/dit.cpython-310.pyc +src/f5_tts/model/backbones/__pycache__/mmdit.cpython-310.pyc +src/f5_tts/model/backbones/__pycache__/unett.cpython-310.pyc +src/f5_tts/scripts/count_max_epoch.py +src/f5_tts/scripts/count_params_gflops.py +src/f5_tts/train/README.md +src/f5_tts/train/finetune_cli.py +src/f5_tts/train/finetune_gradio.py +src/f5_tts/train/train.py +src/f5_tts/train/datasets/prepare_csv_wavs.py +src/f5_tts/train/datasets/prepare_emilia.py +src/f5_tts/train/datasets/prepare_libritts.py +src/f5_tts/train/datasets/prepare_ljspeech.py +src/f5_tts/train/datasets/prepare_wenetspeech4tts.py \ No newline at end of file diff --git a/src/f5_tts.egg-info/dependency_links.txt b/src/f5_tts.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/src/f5_tts.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/f5_tts.egg-info/entry_points.txt b/src/f5_tts.egg-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae1d4a47feee181ad63d2d137d0d9f620a8611b6 --- /dev/null +++ b/src/f5_tts.egg-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +f5-tts_finetune-cli = f5_tts.train.finetune_cli:main +f5-tts_finetune-gradio = f5_tts.train.finetune_gradio:main +f5-tts_infer-cli = f5_tts.infer.infer_cli:main +f5-tts_infer-gradio = f5_tts.infer.infer_gradio:main diff --git a/src/f5_tts.egg-info/requires.txt b/src/f5_tts.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..c822ec2732d8ce82b66f96fbbdea97d32aafcf1f --- /dev/null +++ b/src/f5_tts.egg-info/requires.txt @@ -0,0 +1,36 @@ +accelerate>=0.33.0 +cached_path +click +datasets +ema_pytorch>=0.5.2 +gradio>=3.45.2 +hydra-core>=1.3.0 +jieba +librosa +matplotlib +numpy<=1.26.4 +pydub +pypinyin +safetensors +soundfile +tomli +torch>=2.0.0 +torchaudio>=2.0.0 +torchdiffeq +tqdm>=4.65.0 +transformers +transformers_stream_generator +vocos +wandb +x_transformers>=1.31.14 + +[:platform_machine != "arm64" and platform_system != "Darwin"] +bitsandbytes>0.37.0 + +[eval] +faster_whisper==0.10.1 +funasr +jiwer +modelscope +zhconv +zhon diff --git a/src/f5_tts.egg-info/top_level.txt b/src/f5_tts.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..75fc20604cc5891459c9b7e45fee89620e164ffa --- /dev/null +++ b/src/f5_tts.egg-info/top_level.txt @@ -0,0 +1 @@ +f5_tts diff --git a/src/f5_tts/__pycache__/api.cpython-310.pyc b/src/f5_tts/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d806ac54c203752ab73dad3daf8976219b1d0d7a Binary files /dev/null and b/src/f5_tts/__pycache__/api.cpython-310.pyc differ diff --git a/src/f5_tts/api.py b/src/f5_tts/api.py new file mode 100644 index 0000000000000000000000000000000000000000..7c73c87dae0f79ddb53acd5101c21b525d119899 --- /dev/null +++ b/src/f5_tts/api.py @@ -0,0 +1,165 @@ +import random +import sys +from importlib.resources import files + +import soundfile as sf +import tqdm +from cached_path import cached_path +from omegaconf import OmegaConf + +from f5_tts.infer.utils_infer import ( + load_model, + load_vocoder, + transcribe, + preprocess_ref_audio_text, + infer_process, + remove_silence_for_generated_wav, + save_spectrogram, +) +from f5_tts.model import DiT, UNetT # noqa: F401. used for config +from f5_tts.model.utils import seed_everything + + +class F5TTS: + def __init__( + self, + model="F5TTS_v1_Base", + ckpt_file="", + vocab_file="", + ode_method="euler", + use_ema=True, + vocoder_local_path=None, + device=None, + hf_cache_dir=None, + ): + model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{model}.yaml"))) + model_cls = globals()[model_cfg.model.backbone] + model_arc = model_cfg.model.arch + + self.mel_spec_type = model_cfg.model.mel_spec.mel_spec_type + self.target_sample_rate = model_cfg.model.mel_spec.target_sample_rate + + self.ode_method = ode_method + self.use_ema = use_ema + + if device is not None: + self.device = device + else: + import torch + + self.device = ( + "cuda" + if torch.cuda.is_available() + else "xpu" + if torch.xpu.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" + ) + + # Load models + self.vocoder = load_vocoder( + self.mel_spec_type, vocoder_local_path is not None, vocoder_local_path, self.device, hf_cache_dir + ) + + repo_name, ckpt_step, ckpt_type = "F5-TTS", 1250000, "safetensors" + + # override for previous models + if model == "F5TTS_Base": + if self.mel_spec_type == "vocos": + ckpt_step = 1200000 + elif self.mel_spec_type == "bigvgan": + model = "F5TTS_Base_bigvgan" + ckpt_type = "pt" + elif model == "E2TTS_Base": + repo_name = "E2-TTS" + ckpt_step = 1200000 + else: + raise ValueError(f"Unknown model type: {model}") + + if not ckpt_file: + ckpt_file = str( + cached_path(f"hf://SWivid/{repo_name}/{model}/model_{ckpt_step}.{ckpt_type}", cache_dir=hf_cache_dir) + ) + self.ema_model = load_model( + model_cls, model_arc, ckpt_file, self.mel_spec_type, vocab_file, self.ode_method, self.use_ema, self.device + ) + + def transcribe(self, ref_audio, language=None): + return transcribe(ref_audio, language) + + def export_wav(self, wav, file_wave, remove_silence=False): + sf.write(file_wave, wav, self.target_sample_rate) + + if remove_silence: + remove_silence_for_generated_wav(file_wave) + + def export_spectrogram(self, spec, file_spec): + save_spectrogram(spec, file_spec) + + def infer( + self, + ref_file, + ref_text, + gen_text, + show_info=print, + progress=tqdm, + target_rms=0.1, + cross_fade_duration=0.15, + sway_sampling_coef=-1, + cfg_strength=2, + nfe_step=32, + speed=1.0, + fix_duration=None, + remove_silence=False, + file_wave=None, + file_spec=None, + seed=None, + ): + if seed is None: + self.seed = random.randint(0, sys.maxsize) + seed_everything(self.seed) + + ref_file, ref_text = preprocess_ref_audio_text(ref_file, ref_text, device=self.device) + + wav, sr, spec = infer_process( + ref_file, + ref_text, + gen_text, + self.ema_model, + self.vocoder, + self.mel_spec_type, + show_info=show_info, + progress=progress, + target_rms=target_rms, + cross_fade_duration=cross_fade_duration, + nfe_step=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + speed=speed, + fix_duration=fix_duration, + device=self.device, + ) + + if file_wave is not None: + self.export_wav(wav, file_wave, remove_silence) + + if file_spec is not None: + self.export_spectrogram(spec, file_spec) + + return wav, sr, spec + + +if __name__ == "__main__": + f5tts = F5TTS() + + wav, sr, spec = f5tts.infer( + ref_file=str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")), + ref_text="some call me nature, others call me mother nature.", + gen_text="""I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences.""", + file_wave=str(files("f5_tts").joinpath("../../tests/api_out.wav")), + file_spec=str(files("f5_tts").joinpath("../../tests/api_out.png")), + seed=None, + ) + + print("seed :", f5tts.seed) diff --git a/src/f5_tts/configs/E2TTS_Base.yaml b/src/f5_tts/configs/E2TTS_Base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee701829414864454d42be86260a33722eccdf38 --- /dev/null +++ b/src/f5_tts/configs/E2TTS_Base.yaml @@ -0,0 +1,49 @@ +hydra: + run: + dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +datasets: + name: Emilia_ZH_EN # dataset name + batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200 + batch_size_type: frame # frame | sample + max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models + num_workers: 16 + +optim: + epochs: 11 + learning_rate: 7.5e-5 + num_warmup_updates: 20000 # warmup updates + grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps + max_grad_norm: 1.0 # gradient clipping + bnb_optimizer: False # use bnb 8bit AdamW optimizer or not + +model: + name: E2TTS_Base + tokenizer: pinyin + tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt) + backbone: UNetT + arch: + dim: 1024 + depth: 24 + heads: 16 + ff_mult: 4 + text_mask_padding: False + pe_attn_head: 1 + mel_spec: + target_sample_rate: 24000 + n_mel_channels: 100 + hop_length: 256 + win_length: 1024 + n_fft: 1024 + mel_spec_type: vocos # vocos | bigvgan + vocoder: + is_local: False # use local offline ckpt or not + local_path: null # local vocoder path + +ckpts: + logger: wandb # wandb | tensorboard | null + log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples + save_per_updates: 50000 # save checkpoint per updates + keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + last_per_updates: 5000 # save last checkpoint per updates + save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name} \ No newline at end of file diff --git a/src/f5_tts/configs/E2TTS_Small.yaml b/src/f5_tts/configs/E2TTS_Small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cbb1f44e281ca9fc937eb8097af1bc3618c88d77 --- /dev/null +++ b/src/f5_tts/configs/E2TTS_Small.yaml @@ -0,0 +1,49 @@ +hydra: + run: + dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +datasets: + name: Emilia_ZH_EN + batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200 + batch_size_type: frame # frame | sample + max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models + num_workers: 16 + +optim: + epochs: 11 + learning_rate: 7.5e-5 + num_warmup_updates: 20000 # warmup updates + grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps + max_grad_norm: 1.0 + bnb_optimizer: False + +model: + name: E2TTS_Small + tokenizer: pinyin + tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt) + backbone: UNetT + arch: + dim: 768 + depth: 20 + heads: 12 + ff_mult: 4 + text_mask_padding: False + pe_attn_head: 1 + mel_spec: + target_sample_rate: 24000 + n_mel_channels: 100 + hop_length: 256 + win_length: 1024 + n_fft: 1024 + mel_spec_type: vocos # vocos | bigvgan + vocoder: + is_local: False # use local offline ckpt or not + local_path: null # local vocoder path + +ckpts: + logger: wandb # wandb | tensorboard | null + log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples + save_per_updates: 50000 # save checkpoint per updates + keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + last_per_updates: 5000 # save last checkpoint per updates + save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name} \ No newline at end of file diff --git a/src/f5_tts/configs/F5TTS_Base.yaml b/src/f5_tts/configs/F5TTS_Base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7043cb4a90206a741f509fa383bff575e903c3b9 --- /dev/null +++ b/src/f5_tts/configs/F5TTS_Base.yaml @@ -0,0 +1,52 @@ +hydra: + run: + dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +datasets: + name: your_training_dataset # dataset name + batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200 + batch_size_type: frame # frame | sample + max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models + num_workers: 16 + +optim: + epochs: 11 + learning_rate: 7.5e-5 + num_warmup_updates: 20000 # warmup updates + grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps + max_grad_norm: 1.0 # gradient clipping + bnb_optimizer: False # use bnb 8bit AdamW optimizer or not + +model: + name: F5TTS_Base # model name + tokenizer: char # tokenizer type + tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt) + backbone: DiT + arch: + dim: 1024 + depth: 22 + heads: 16 + ff_mult: 2 + text_dim: 512 + text_mask_padding: False + conv_layers: 4 + pe_attn_head: 1 + checkpoint_activations: False # recompute activations and save memory for extra compute + mel_spec: + target_sample_rate: 24000 + n_mel_channels: 100 + hop_length: 256 + win_length: 1024 + n_fft: 1024 + mel_spec_type: vocos # vocos | bigvgan + vocoder: + is_local: False # use local offline ckpt or not + local_path: null # local vocoder path + +ckpts: + logger: tensorboard # wandb | tensorboard | null + log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples + save_per_updates: 50000 # save checkpoint per updates + keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + last_per_updates: 5000 # save last checkpoint per updates + save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name} \ No newline at end of file diff --git a/src/f5_tts/configs/F5TTS_Small.yaml b/src/f5_tts/configs/F5TTS_Small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..faae390337d076b18e4a35c1af4ac48d92524952 --- /dev/null +++ b/src/f5_tts/configs/F5TTS_Small.yaml @@ -0,0 +1,52 @@ +hydra: + run: + dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +datasets: + name: Emilia_ZH_EN + batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200 + batch_size_type: frame # frame | sample + max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models + num_workers: 16 + +optim: + epochs: 11 + learning_rate: 7.5e-5 + num_warmup_updates: 20000 # warmup updates + grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps + max_grad_norm: 1.0 # gradient clipping + bnb_optimizer: False # use bnb 8bit AdamW optimizer or not + +model: + name: F5TTS_Small + tokenizer: pinyin + tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt) + backbone: DiT + arch: + dim: 768 + depth: 18 + heads: 12 + ff_mult: 2 + text_dim: 512 + text_mask_padding: False + conv_layers: 4 + pe_attn_head: 1 + checkpoint_activations: False # recompute activations and save memory for extra compute + mel_spec: + target_sample_rate: 24000 + n_mel_channels: 100 + hop_length: 256 + win_length: 1024 + n_fft: 1024 + mel_spec_type: vocos # vocos | bigvgan + vocoder: + is_local: False # use local offline ckpt or not + local_path: null # local vocoder path + +ckpts: + logger: wandb # wandb | tensorboard | null + log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples + save_per_updates: 50000 # save checkpoint per updates + keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + last_per_updates: 5000 # save last checkpoint per updates + save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name} \ No newline at end of file diff --git a/src/f5_tts/configs/F5TTS_v1_Base.yaml b/src/f5_tts/configs/F5TTS_v1_Base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7717facb114c0c1fc598e29a5c589485249d9d1 --- /dev/null +++ b/src/f5_tts/configs/F5TTS_v1_Base.yaml @@ -0,0 +1,53 @@ +hydra: + run: + dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +datasets: + name: Emilia_ZH_EN # dataset name + batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200 + batch_size_type: frame # frame | sample + max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models + num_workers: 16 + +optim: + epochs: 11 + learning_rate: 7.5e-5 + num_warmup_updates: 20000 # warmup updates + grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps + max_grad_norm: 1.0 # gradient clipping + bnb_optimizer: False # use bnb 8bit AdamW optimizer or not + +model: + name: F5TTS_v1_Base # model name + tokenizer: pinyin # tokenizer type + tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt) + backbone: DiT + arch: + dim: 1024 + depth: 22 + heads: 16 + ff_mult: 2 + text_dim: 512 + text_mask_padding: True + qk_norm: null # null | rms_norm + conv_layers: 4 + pe_attn_head: null + checkpoint_activations: False # recompute activations and save memory for extra compute + mel_spec: + target_sample_rate: 24000 + n_mel_channels: 100 + hop_length: 256 + win_length: 1024 + n_fft: 1024 + mel_spec_type: vocos # vocos | bigvgan + vocoder: + is_local: False # use local offline ckpt or not + local_path: null # local vocoder path + +ckpts: + logger: wandb # wandb | tensorboard | null + log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples + save_per_updates: 50000 # save checkpoint per updates + keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + last_per_updates: 5000 # save last checkpoint per updates + save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name} \ No newline at end of file diff --git a/src/f5_tts/eval/README.md b/src/f5_tts/eval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c33ef9285723b5034bb417c707c2d53007a3c0d6 --- /dev/null +++ b/src/f5_tts/eval/README.md @@ -0,0 +1,52 @@ + +# Evaluation + +Install packages for evaluation: + +```bash +pip install -e .[eval] +``` + +## Generating Samples for Evaluation + +### Prepare Test Datasets + +1. *Seed-TTS testset*: Download from [seed-tts-eval](https://github.com/BytedanceSpeech/seed-tts-eval). +2. *LibriSpeech test-clean*: Download from [OpenSLR](http://www.openslr.org/12/). +3. Unzip the downloaded datasets and place them in the `data/` directory. +4. Update the path for *LibriSpeech test-clean* data in `src/f5_tts/eval/eval_infer_batch.py` +5. Our filtered LibriSpeech-PC 4-10s subset: `data/librispeech_pc_test_clean_cross_sentence.lst` + +### Batch Inference for Test Set + +To run batch inference for evaluations, execute the following commands: + +```bash +# batch inference for evaluations +accelerate config # if not set before +bash src/f5_tts/eval/eval_infer_batch.sh +``` + +## Objective Evaluation on Generated Results + +### Download Evaluation Model Checkpoints + +1. Chinese ASR Model: [Paraformer-zh](https://huggingface.co/funasr/paraformer-zh) +2. English ASR Model: [Faster-Whisper](https://huggingface.co/Systran/faster-whisper-large-v3) +3. WavLM Model: Download from [Google Drive](https://drive.google.com/file/d/1-aE1NfzpRCLxA4GUxX9ITI3F9LlbtEGP/view). + +Then update in the following scripts with the paths you put evaluation model ckpts to. + +### Objective Evaluation + +Update the path with your batch-inferenced results, and carry out WER / SIM / UTMOS evaluations: +```bash +# Evaluation [WER] for Seed-TTS test [ZH] set +python src/f5_tts/eval/eval_seedtts_testset.py --eval_task wer --lang zh --gen_wav_dir --gpu_nums 8 + +# Evaluation [SIM] for LibriSpeech-PC test-clean (cross-sentence) +python src/f5_tts/eval/eval_librispeech_test_clean.py --eval_task sim --gen_wav_dir --librispeech_test_clean_path + +# Evaluation [UTMOS]. --ext: Audio extension +python src/f5_tts/eval/eval_utmos.py --audio_dir --ext wav +``` diff --git a/src/f5_tts/eval/ecapa_tdnn.py b/src/f5_tts/eval/ecapa_tdnn.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc431eb9e2fc6173e6009ef3b0326a40618b1ec --- /dev/null +++ b/src/f5_tts/eval/ecapa_tdnn.py @@ -0,0 +1,330 @@ +# just for speaker similarity evaluation, third-party code + +# From https://github.com/microsoft/UniSpeech/blob/main/downstreams/speaker_verification/models/ +# part of the code is borrowed from https://github.com/lawlict/ECAPA-TDNN + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F + + +""" Res2Conv1d + BatchNorm1d + ReLU +""" + + +class Res2Conv1dReluBn(nn.Module): + """ + in_channels == out_channels == channels + """ + + def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, scale=4): + super().__init__() + assert channels % scale == 0, "{} % {} != 0".format(channels, scale) + self.scale = scale + self.width = channels // scale + self.nums = scale if scale == 1 else scale - 1 + + self.convs = [] + self.bns = [] + for i in range(self.nums): + self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias)) + self.bns.append(nn.BatchNorm1d(self.width)) + self.convs = nn.ModuleList(self.convs) + self.bns = nn.ModuleList(self.bns) + + def forward(self, x): + out = [] + spx = torch.split(x, self.width, 1) + for i in range(self.nums): + if i == 0: + sp = spx[i] + else: + sp = sp + spx[i] + # Order: conv -> relu -> bn + sp = self.convs[i](sp) + sp = self.bns[i](F.relu(sp)) + out.append(sp) + if self.scale != 1: + out.append(spx[self.nums]) + out = torch.cat(out, dim=1) + + return out + + +""" Conv1d + BatchNorm1d + ReLU +""" + + +class Conv1dReluBn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True): + super().__init__() + self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias) + self.bn = nn.BatchNorm1d(out_channels) + + def forward(self, x): + return self.bn(F.relu(self.conv(x))) + + +""" The SE connection of 1D case. +""" + + +class SE_Connect(nn.Module): + def __init__(self, channels, se_bottleneck_dim=128): + super().__init__() + self.linear1 = nn.Linear(channels, se_bottleneck_dim) + self.linear2 = nn.Linear(se_bottleneck_dim, channels) + + def forward(self, x): + out = x.mean(dim=2) + out = F.relu(self.linear1(out)) + out = torch.sigmoid(self.linear2(out)) + out = x * out.unsqueeze(2) + + return out + + +""" SE-Res2Block of the ECAPA-TDNN architecture. +""" + +# def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale): +# return nn.Sequential( +# Conv1dReluBn(channels, 512, kernel_size=1, stride=1, padding=0), +# Res2Conv1dReluBn(512, kernel_size, stride, padding, dilation, scale=scale), +# Conv1dReluBn(512, channels, kernel_size=1, stride=1, padding=0), +# SE_Connect(channels) +# ) + + +class SE_Res2Block(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, scale, se_bottleneck_dim): + super().__init__() + self.Conv1dReluBn1 = Conv1dReluBn(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.Res2Conv1dReluBn = Res2Conv1dReluBn(out_channels, kernel_size, stride, padding, dilation, scale=scale) + self.Conv1dReluBn2 = Conv1dReluBn(out_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.SE_Connect = SE_Connect(out_channels, se_bottleneck_dim) + + self.shortcut = None + if in_channels != out_channels: + self.shortcut = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + ) + + def forward(self, x): + residual = x + if self.shortcut: + residual = self.shortcut(x) + + x = self.Conv1dReluBn1(x) + x = self.Res2Conv1dReluBn(x) + x = self.Conv1dReluBn2(x) + x = self.SE_Connect(x) + + return x + residual + + +""" Attentive weighted mean and standard deviation pooling. +""" + + +class AttentiveStatsPool(nn.Module): + def __init__(self, in_dim, attention_channels=128, global_context_att=False): + super().__init__() + self.global_context_att = global_context_att + + # Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs. + if global_context_att: + self.linear1 = nn.Conv1d(in_dim * 3, attention_channels, kernel_size=1) # equals W and b in the paper + else: + self.linear1 = nn.Conv1d(in_dim, attention_channels, kernel_size=1) # equals W and b in the paper + self.linear2 = nn.Conv1d(attention_channels, in_dim, kernel_size=1) # equals V and k in the paper + + def forward(self, x): + if self.global_context_att: + context_mean = torch.mean(x, dim=-1, keepdim=True).expand_as(x) + context_std = torch.sqrt(torch.var(x, dim=-1, keepdim=True) + 1e-10).expand_as(x) + x_in = torch.cat((x, context_mean, context_std), dim=1) + else: + x_in = x + + # DON'T use ReLU here! In experiments, I find ReLU hard to converge. + alpha = torch.tanh(self.linear1(x_in)) + # alpha = F.relu(self.linear1(x_in)) + alpha = torch.softmax(self.linear2(alpha), dim=2) + mean = torch.sum(alpha * x, dim=2) + residuals = torch.sum(alpha * (x**2), dim=2) - mean**2 + std = torch.sqrt(residuals.clamp(min=1e-9)) + return torch.cat([mean, std], dim=1) + + +class ECAPA_TDNN(nn.Module): + def __init__( + self, + feat_dim=80, + channels=512, + emb_dim=192, + global_context_att=False, + feat_type="wavlm_large", + sr=16000, + feature_selection="hidden_states", + update_extract=False, + config_path=None, + ): + super().__init__() + + self.feat_type = feat_type + self.feature_selection = feature_selection + self.update_extract = update_extract + self.sr = sr + + torch.hub._validate_not_a_forked_repo = lambda a, b, c: True + try: + local_s3prl_path = os.path.expanduser("~/.cache/torch/hub/s3prl_s3prl_main") + self.feature_extract = torch.hub.load(local_s3prl_path, feat_type, source="local", config_path=config_path) + except: # noqa: E722 + self.feature_extract = torch.hub.load("s3prl/s3prl", feat_type) + + if len(self.feature_extract.model.encoder.layers) == 24 and hasattr( + self.feature_extract.model.encoder.layers[23].self_attn, "fp32_attention" + ): + self.feature_extract.model.encoder.layers[23].self_attn.fp32_attention = False + if len(self.feature_extract.model.encoder.layers) == 24 and hasattr( + self.feature_extract.model.encoder.layers[11].self_attn, "fp32_attention" + ): + self.feature_extract.model.encoder.layers[11].self_attn.fp32_attention = False + + self.feat_num = self.get_feat_num() + self.feature_weight = nn.Parameter(torch.zeros(self.feat_num)) + + if feat_type != "fbank" and feat_type != "mfcc": + freeze_list = ["final_proj", "label_embs_concat", "mask_emb", "project_q", "quantizer"] + for name, param in self.feature_extract.named_parameters(): + for freeze_val in freeze_list: + if freeze_val in name: + param.requires_grad = False + break + + if not self.update_extract: + for param in self.feature_extract.parameters(): + param.requires_grad = False + + self.instance_norm = nn.InstanceNorm1d(feat_dim) + # self.channels = [channels] * 4 + [channels * 3] + self.channels = [channels] * 4 + [1536] + + self.layer1 = Conv1dReluBn(feat_dim, self.channels[0], kernel_size=5, padding=2) + self.layer2 = SE_Res2Block( + self.channels[0], + self.channels[1], + kernel_size=3, + stride=1, + padding=2, + dilation=2, + scale=8, + se_bottleneck_dim=128, + ) + self.layer3 = SE_Res2Block( + self.channels[1], + self.channels[2], + kernel_size=3, + stride=1, + padding=3, + dilation=3, + scale=8, + se_bottleneck_dim=128, + ) + self.layer4 = SE_Res2Block( + self.channels[2], + self.channels[3], + kernel_size=3, + stride=1, + padding=4, + dilation=4, + scale=8, + se_bottleneck_dim=128, + ) + + # self.conv = nn.Conv1d(self.channels[-1], self.channels[-1], kernel_size=1) + cat_channels = channels * 3 + self.conv = nn.Conv1d(cat_channels, self.channels[-1], kernel_size=1) + self.pooling = AttentiveStatsPool( + self.channels[-1], attention_channels=128, global_context_att=global_context_att + ) + self.bn = nn.BatchNorm1d(self.channels[-1] * 2) + self.linear = nn.Linear(self.channels[-1] * 2, emb_dim) + + def get_feat_num(self): + self.feature_extract.eval() + wav = [torch.randn(self.sr).to(next(self.feature_extract.parameters()).device)] + with torch.no_grad(): + features = self.feature_extract(wav) + select_feature = features[self.feature_selection] + if isinstance(select_feature, (list, tuple)): + return len(select_feature) + else: + return 1 + + def get_feat(self, x): + if self.update_extract: + x = self.feature_extract([sample for sample in x]) + else: + with torch.no_grad(): + if self.feat_type == "fbank" or self.feat_type == "mfcc": + x = self.feature_extract(x) + 1e-6 # B x feat_dim x time_len + else: + x = self.feature_extract([sample for sample in x]) + + if self.feat_type == "fbank": + x = x.log() + + if self.feat_type != "fbank" and self.feat_type != "mfcc": + x = x[self.feature_selection] + if isinstance(x, (list, tuple)): + x = torch.stack(x, dim=0) + else: + x = x.unsqueeze(0) + norm_weights = F.softmax(self.feature_weight, dim=-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) + x = (norm_weights * x).sum(dim=0) + x = torch.transpose(x, 1, 2) + 1e-6 + + x = self.instance_norm(x) + return x + + def forward(self, x): + x = self.get_feat(x) + + out1 = self.layer1(x) + out2 = self.layer2(out1) + out3 = self.layer3(out2) + out4 = self.layer4(out3) + + out = torch.cat([out2, out3, out4], dim=1) + out = F.relu(self.conv(out)) + out = self.bn(self.pooling(out)) + out = self.linear(out) + + return out + + +def ECAPA_TDNN_SMALL( + feat_dim, + emb_dim=256, + feat_type="wavlm_large", + sr=16000, + feature_selection="hidden_states", + update_extract=False, + config_path=None, +): + return ECAPA_TDNN( + feat_dim=feat_dim, + channels=512, + emb_dim=emb_dim, + feat_type=feat_type, + sr=sr, + feature_selection=feature_selection, + update_extract=update_extract, + config_path=config_path, + ) diff --git a/src/f5_tts/eval/eval_infer_batch.py b/src/f5_tts/eval/eval_infer_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..e779ff0703c1d6febef273fc1bae6e2ec2c2b266 --- /dev/null +++ b/src/f5_tts/eval/eval_infer_batch.py @@ -0,0 +1,202 @@ +import os +import sys + +sys.path.append(os.getcwd()) + +import argparse +import time +from importlib.resources import files + +import torch +import torchaudio +from accelerate import Accelerator +from omegaconf import OmegaConf +from tqdm import tqdm + +from f5_tts.eval.utils_eval import ( + get_inference_prompt, + get_librispeech_test_clean_metainfo, + get_seedtts_testset_metainfo, +) +from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder +from f5_tts.model import CFM, DiT, UNetT # noqa: F401. used for config +from f5_tts.model.utils import get_tokenizer + +accelerator = Accelerator() +device = f"cuda:{accelerator.process_index}" + + +use_ema = True +target_rms = 0.1 + + +rel_path = str(files("f5_tts").joinpath("../../")) + + +def main(): + parser = argparse.ArgumentParser(description="batch inference") + + parser.add_argument("-s", "--seed", default=None, type=int) + parser.add_argument("-n", "--expname", required=True) + parser.add_argument("-c", "--ckptstep", default=1250000, type=int) + + parser.add_argument("-nfe", "--nfestep", default=32, type=int) + parser.add_argument("-o", "--odemethod", default="euler") + parser.add_argument("-ss", "--swaysampling", default=-1, type=float) + + parser.add_argument("-t", "--testset", required=True) + + args = parser.parse_args() + + seed = args.seed + exp_name = args.expname + ckpt_step = args.ckptstep + + nfe_step = args.nfestep + ode_method = args.odemethod + sway_sampling_coef = args.swaysampling + + testset = args.testset + + infer_batch_size = 1 # max frames. 1 for ddp single inference (recommended) + cfg_strength = 2.0 + speed = 1.0 + use_truth_duration = False + no_ref_audio = False + + model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{exp_name}.yaml"))) + model_cls = globals()[model_cfg.model.backbone] + model_arc = model_cfg.model.arch + + dataset_name = model_cfg.datasets.name + tokenizer = model_cfg.model.tokenizer + + mel_spec_type = model_cfg.model.mel_spec.mel_spec_type + target_sample_rate = model_cfg.model.mel_spec.target_sample_rate + n_mel_channels = model_cfg.model.mel_spec.n_mel_channels + hop_length = model_cfg.model.mel_spec.hop_length + win_length = model_cfg.model.mel_spec.win_length + n_fft = model_cfg.model.mel_spec.n_fft + + if testset == "ls_pc_test_clean": + metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst" + librispeech_test_clean_path = "/LibriSpeech/test-clean" # test-clean path + metainfo = get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path) + + elif testset == "seedtts_test_zh": + metalst = rel_path + "/data/seedtts_testset/zh/meta.lst" + metainfo = get_seedtts_testset_metainfo(metalst) + + elif testset == "seedtts_test_en": + metalst = rel_path + "/data/seedtts_testset/en/meta.lst" + metainfo = get_seedtts_testset_metainfo(metalst) + + # path to save genereted wavs + output_dir = ( + f"{rel_path}/" + f"results/{exp_name}_{ckpt_step}/{testset}/" + f"seed{seed}_{ode_method}_nfe{nfe_step}_{mel_spec_type}" + f"{f'_ss{sway_sampling_coef}' if sway_sampling_coef else ''}" + f"_cfg{cfg_strength}_speed{speed}" + f"{'_gt-dur' if use_truth_duration else ''}" + f"{'_no-ref-audio' if no_ref_audio else ''}" + ) + + # -------------------------------------------------# + + prompts_all = get_inference_prompt( + metainfo, + speed=speed, + tokenizer=tokenizer, + target_sample_rate=target_sample_rate, + n_mel_channels=n_mel_channels, + hop_length=hop_length, + mel_spec_type=mel_spec_type, + target_rms=target_rms, + use_truth_duration=use_truth_duration, + infer_batch_size=infer_batch_size, + ) + + # Vocoder model + local = False + if mel_spec_type == "vocos": + vocoder_local_path = "../checkpoints/charactr/vocos-mel-24khz" + elif mel_spec_type == "bigvgan": + vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x" + vocoder = load_vocoder(vocoder_name=mel_spec_type, is_local=local, local_path=vocoder_local_path) + + # Tokenizer + vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer) + + # Model + model = CFM( + transformer=model_cls(**model_arc, text_num_embeds=vocab_size, mel_dim=n_mel_channels), + mel_spec_kwargs=dict( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ), + odeint_kwargs=dict( + method=ode_method, + ), + vocab_char_map=vocab_char_map, + ).to(device) + + ckpt_path = rel_path + f"/ckpts/{exp_name}/model_{ckpt_step}.pt" + if not os.path.exists(ckpt_path): + print("Loading from self-organized training checkpoints rather than released pretrained.") + ckpt_path = rel_path + f"/{model_cfg.ckpts.save_dir}/model_{ckpt_step}.pt" + dtype = torch.float32 if mel_spec_type == "bigvgan" else None + model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema) + + if not os.path.exists(output_dir) and accelerator.is_main_process: + os.makedirs(output_dir) + + # start batch inference + accelerator.wait_for_everyone() + start = time.time() + + with accelerator.split_between_processes(prompts_all) as prompts: + for prompt in tqdm(prompts, disable=not accelerator.is_local_main_process): + utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = prompt + ref_mels = ref_mels.to(device) + ref_mel_lens = torch.tensor(ref_mel_lens, dtype=torch.long).to(device) + total_mel_lens = torch.tensor(total_mel_lens, dtype=torch.long).to(device) + + # Inference + with torch.inference_mode(): + generated, _ = model.sample( + cond=ref_mels, + text=final_text_list, + duration=total_mel_lens, + lens=ref_mel_lens, + steps=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + no_ref_audio=no_ref_audio, + seed=seed, + ) + # Final result + for i, gen in enumerate(generated): + gen = gen[ref_mel_lens[i] : total_mel_lens[i], :].unsqueeze(0) + gen_mel_spec = gen.permute(0, 2, 1).to(torch.float32) + if mel_spec_type == "vocos": + generated_wave = vocoder.decode(gen_mel_spec).cpu() + elif mel_spec_type == "bigvgan": + generated_wave = vocoder(gen_mel_spec).squeeze(0).cpu() + + if ref_rms_list[i] < target_rms: + generated_wave = generated_wave * ref_rms_list[i] / target_rms + torchaudio.save(f"{output_dir}/{utts[i]}.wav", generated_wave, target_sample_rate) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + timediff = time.time() - start + print(f"Done batch inference in {timediff / 60 :.2f} minutes.") + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/eval/eval_infer_batch.sh b/src/f5_tts/eval/eval_infer_batch.sh new file mode 100644 index 0000000000000000000000000000000000000000..a5b4f631eaa7df754a5a422d42657d2cd18acfea --- /dev/null +++ b/src/f5_tts/eval/eval_infer_batch.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# e.g. F5-TTS, 16 NFE +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "seedtts_test_zh" -nfe 16 +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "seedtts_test_en" -nfe 16 +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "ls_pc_test_clean" -nfe 16 + +# e.g. Vanilla E2 TTS, 32 NFE +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "seedtts_test_zh" -o "midpoint" -ss 0 +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "seedtts_test_en" -o "midpoint" -ss 0 +accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "ls_pc_test_clean" -o "midpoint" -ss 0 + +# e.g. evaluate F5-TTS 16 NFE result on Seed-TTS test-zh +python src/f5_tts/eval/eval_seedtts_testset.py -e wer -l zh --gen_wav_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0 --gpu_nums 8 +python src/f5_tts/eval/eval_seedtts_testset.py -e sim -l zh --gen_wav_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0 --gpu_nums 8 +python src/f5_tts/eval/eval_utmos.py --audio_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0 + +# etc. diff --git a/src/f5_tts/eval/eval_librispeech_test_clean.py b/src/f5_tts/eval/eval_librispeech_test_clean.py new file mode 100644 index 0000000000000000000000000000000000000000..0b403689b02c250e0882e946f029f93ec8739292 --- /dev/null +++ b/src/f5_tts/eval/eval_librispeech_test_clean.py @@ -0,0 +1,90 @@ +# Evaluate with Librispeech test-clean, ~3s prompt to generate 4-10s audio (the way of valle/voicebox evaluation) + +import argparse +import json +import os +import sys + +sys.path.append(os.getcwd()) + +import multiprocessing as mp +from importlib.resources import files + +import numpy as np +from f5_tts.eval.utils_eval import ( + get_librispeech_test, + run_asr_wer, + run_sim, +) + +rel_path = str(files("f5_tts").joinpath("../../")) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("-e", "--eval_task", type=str, default="wer", choices=["sim", "wer"]) + parser.add_argument("-l", "--lang", type=str, default="en") + parser.add_argument("-g", "--gen_wav_dir", type=str, required=True) + parser.add_argument("-p", "--librispeech_test_clean_path", type=str, required=True) + parser.add_argument("-n", "--gpu_nums", type=int, default=8, help="Number of GPUs to use") + parser.add_argument("--local", action="store_true", help="Use local custom checkpoint directory") + return parser.parse_args() + + +def main(): + args = get_args() + eval_task = args.eval_task + lang = args.lang + librispeech_test_clean_path = args.librispeech_test_clean_path # test-clean path + gen_wav_dir = args.gen_wav_dir + metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst" + + gpus = list(range(args.gpu_nums)) + test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path) + + ## In LibriSpeech, some speakers utilized varying voice characteristics for different characters in the book, + ## leading to a low similarity for the ground truth in some cases. + # test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth = True) # eval ground truth + + local = args.local + if local: # use local custom checkpoint dir + asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3" + else: + asr_ckpt_dir = "" # auto download to cache dir + wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth" + + # -------------------------------------------------------------------------- + + full_results = [] + metrics = [] + + if eval_task == "wer": + with mp.Pool(processes=len(gpus)) as pool: + args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set] + results = pool.map(run_asr_wer, args) + for r in results: + full_results.extend(r) + elif eval_task == "sim": + with mp.Pool(processes=len(gpus)) as pool: + args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set] + results = pool.map(run_sim, args) + for r in results: + full_results.extend(r) + else: + raise ValueError(f"Unknown metric type: {eval_task}") + + result_path = f"{gen_wav_dir}/_{eval_task}_results.jsonl" + with open(result_path, "w") as f: + for line in full_results: + metrics.append(line[eval_task]) + f.write(json.dumps(line, ensure_ascii=False) + "\n") + metric = round(np.mean(metrics), 5) + f.write(f"\n{eval_task.upper()}: {metric}\n") + + print(f"\nTotal {len(metrics)} samples") + print(f"{eval_task.upper()}: {metric}") + print(f"{eval_task.upper()} results saved to {result_path}") + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/eval/eval_seedtts_testset.py b/src/f5_tts/eval/eval_seedtts_testset.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb68eeab3c018388aff6bf225ce0efa9d7acae1 --- /dev/null +++ b/src/f5_tts/eval/eval_seedtts_testset.py @@ -0,0 +1,89 @@ +# Evaluate with Seed-TTS testset + +import argparse +import json +import os +import sys + +sys.path.append(os.getcwd()) + +import multiprocessing as mp +from importlib.resources import files + +import numpy as np +from f5_tts.eval.utils_eval import ( + get_seed_tts_test, + run_asr_wer, + run_sim, +) + +rel_path = str(files("f5_tts").joinpath("../../")) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("-e", "--eval_task", type=str, default="wer", choices=["sim", "wer"]) + parser.add_argument("-l", "--lang", type=str, default="en", choices=["zh", "en"]) + parser.add_argument("-g", "--gen_wav_dir", type=str, required=True) + parser.add_argument("-n", "--gpu_nums", type=int, default=8, help="Number of GPUs to use") + parser.add_argument("--local", action="store_true", help="Use local custom checkpoint directory") + return parser.parse_args() + + +def main(): + args = get_args() + eval_task = args.eval_task + lang = args.lang + gen_wav_dir = args.gen_wav_dir + metalst = rel_path + f"/data/seedtts_testset/{lang}/meta.lst" # seed-tts testset + + # NOTE. paraformer-zh result will be slightly different according to the number of gpus, cuz batchsize is different + # zh 1.254 seems a result of 4 workers wer_seed_tts + gpus = list(range(args.gpu_nums)) + test_set = get_seed_tts_test(metalst, gen_wav_dir, gpus) + + local = args.local + if local: # use local custom checkpoint dir + if lang == "zh": + asr_ckpt_dir = "../checkpoints/funasr" # paraformer-zh dir under funasr + elif lang == "en": + asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3" + else: + asr_ckpt_dir = "" # auto download to cache dir + wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth" + + # -------------------------------------------------------------------------- + + full_results = [] + metrics = [] + + if eval_task == "wer": + with mp.Pool(processes=len(gpus)) as pool: + args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set] + results = pool.map(run_asr_wer, args) + for r in results: + full_results.extend(r) + elif eval_task == "sim": + with mp.Pool(processes=len(gpus)) as pool: + args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set] + results = pool.map(run_sim, args) + for r in results: + full_results.extend(r) + else: + raise ValueError(f"Unknown metric type: {eval_task}") + + result_path = f"{gen_wav_dir}/_{eval_task}_results.jsonl" + with open(result_path, "w") as f: + for line in full_results: + metrics.append(line[eval_task]) + f.write(json.dumps(line, ensure_ascii=False) + "\n") + metric = round(np.mean(metrics), 5) + f.write(f"\n{eval_task.upper()}: {metric}\n") + + print(f"\nTotal {len(metrics)} samples") + print(f"{eval_task.upper()}: {metric}") + print(f"{eval_task.upper()} results saved to {result_path}") + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/eval/eval_utmos.py b/src/f5_tts/eval/eval_utmos.py new file mode 100644 index 0000000000000000000000000000000000000000..b6166e8ab073a6134b23936e15e440332991bab2 --- /dev/null +++ b/src/f5_tts/eval/eval_utmos.py @@ -0,0 +1,42 @@ +import argparse +import json +from pathlib import Path + +import librosa +import torch +from tqdm import tqdm + + +def main(): + parser = argparse.ArgumentParser(description="UTMOS Evaluation") + parser.add_argument("--audio_dir", type=str, required=True, help="Audio file path.") + parser.add_argument("--ext", type=str, default="wav", help="Audio extension.") + args = parser.parse_args() + + device = "cuda" if torch.cuda.is_available() else "xpu" if torch.xpu.is_available() else "cpu" + + predictor = torch.hub.load("tarepan/SpeechMOS:v1.2.0", "utmos22_strong", trust_repo=True) + predictor = predictor.to(device) + + audio_paths = list(Path(args.audio_dir).rglob(f"*.{args.ext}")) + utmos_score = 0 + + utmos_result_path = Path(args.audio_dir) / "_utmos_results.jsonl" + with open(utmos_result_path, "w", encoding="utf-8") as f: + for audio_path in tqdm(audio_paths, desc="Processing"): + wav, sr = librosa.load(audio_path, sr=None, mono=True) + wav_tensor = torch.from_numpy(wav).to(device).unsqueeze(0) + score = predictor(wav_tensor, sr) + line = {} + line["wav"], line["utmos"] = str(audio_path.stem), score.item() + utmos_score += score.item() + f.write(json.dumps(line, ensure_ascii=False) + "\n") + avg_score = utmos_score / len(audio_paths) if len(audio_paths) > 0 else 0 + f.write(f"\nUTMOS: {avg_score:.4f}\n") + + print(f"UTMOS: {avg_score:.4f}") + print(f"UTMOS results saved to {utmos_result_path}") + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/eval/utils_eval.py b/src/f5_tts/eval/utils_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d8407adb82e765632fb939b3afc4f63a750869b7 --- /dev/null +++ b/src/f5_tts/eval/utils_eval.py @@ -0,0 +1,418 @@ +import math +import os +import random +import string +from pathlib import Path + +import torch +import torch.nn.functional as F +import torchaudio +from tqdm import tqdm + +from f5_tts.eval.ecapa_tdnn import ECAPA_TDNN_SMALL +from f5_tts.model.modules import MelSpec +from f5_tts.model.utils import convert_char_to_pinyin + + +# seedtts testset metainfo: utt, prompt_text, prompt_wav, gt_text, gt_wav +def get_seedtts_testset_metainfo(metalst): + f = open(metalst) + lines = f.readlines() + f.close() + metainfo = [] + for line in lines: + if len(line.strip().split("|")) == 5: + utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|") + elif len(line.strip().split("|")) == 4: + utt, prompt_text, prompt_wav, gt_text = line.strip().split("|") + gt_wav = os.path.join(os.path.dirname(metalst), "wavs", utt + ".wav") + if not os.path.isabs(prompt_wav): + prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav) + metainfo.append((utt, prompt_text, prompt_wav, gt_text, gt_wav)) + return metainfo + + +# librispeech test-clean metainfo: gen_utt, ref_txt, ref_wav, gen_txt, gen_wav +def get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path): + f = open(metalst) + lines = f.readlines() + f.close() + metainfo = [] + for line in lines: + ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t") + + # ref_txt = ref_txt[0] + ref_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc) + ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-") + ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac") + + # gen_txt = gen_txt[0] + gen_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc) + gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-") + gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac") + + metainfo.append((gen_utt, ref_txt, ref_wav, " " + gen_txt, gen_wav)) + + return metainfo + + +# padded to max length mel batch +def padded_mel_batch(ref_mels): + max_mel_length = torch.LongTensor([mel.shape[-1] for mel in ref_mels]).amax() + padded_ref_mels = [] + for mel in ref_mels: + padded_ref_mel = F.pad(mel, (0, max_mel_length - mel.shape[-1]), value=0) + padded_ref_mels.append(padded_ref_mel) + padded_ref_mels = torch.stack(padded_ref_mels) + padded_ref_mels = padded_ref_mels.permute(0, 2, 1) + return padded_ref_mels + + +# get prompts from metainfo containing: utt, prompt_text, prompt_wav, gt_text, gt_wav + + +def get_inference_prompt( + metainfo, + speed=1.0, + tokenizer="pinyin", + polyphone=True, + target_sample_rate=24000, + n_fft=1024, + win_length=1024, + n_mel_channels=100, + hop_length=256, + mel_spec_type="vocos", + target_rms=0.1, + use_truth_duration=False, + infer_batch_size=1, + num_buckets=200, + min_secs=3, + max_secs=40, +): + prompts_all = [] + + min_tokens = min_secs * target_sample_rate // hop_length + max_tokens = max_secs * target_sample_rate // hop_length + + batch_accum = [0] * num_buckets + utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = ( + [[] for _ in range(num_buckets)] for _ in range(6) + ) + + mel_spectrogram = MelSpec( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ) + + for utt, prompt_text, prompt_wav, gt_text, gt_wav in tqdm(metainfo, desc="Processing prompts..."): + # Audio + ref_audio, ref_sr = torchaudio.load(prompt_wav) + ref_rms = torch.sqrt(torch.mean(torch.square(ref_audio))) + if ref_rms < target_rms: + ref_audio = ref_audio * target_rms / ref_rms + assert ref_audio.shape[-1] > 5000, f"Empty prompt wav: {prompt_wav}, or torchaudio backend issue." + if ref_sr != target_sample_rate: + resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate) + ref_audio = resampler(ref_audio) + + # Text + if len(prompt_text[-1].encode("utf-8")) == 1: + prompt_text = prompt_text + " " + text = [prompt_text + gt_text] + if tokenizer == "pinyin": + text_list = convert_char_to_pinyin(text, polyphone=polyphone) + else: + text_list = text + + # Duration, mel frame length + ref_mel_len = ref_audio.shape[-1] // hop_length + if use_truth_duration: + gt_audio, gt_sr = torchaudio.load(gt_wav) + if gt_sr != target_sample_rate: + resampler = torchaudio.transforms.Resample(gt_sr, target_sample_rate) + gt_audio = resampler(gt_audio) + total_mel_len = ref_mel_len + int(gt_audio.shape[-1] / hop_length / speed) + + # # test vocoder resynthesis + # ref_audio = gt_audio + else: + ref_text_len = len(prompt_text.encode("utf-8")) + gen_text_len = len(gt_text.encode("utf-8")) + total_mel_len = ref_mel_len + int(ref_mel_len / ref_text_len * gen_text_len / speed) + + # to mel spectrogram + ref_mel = mel_spectrogram(ref_audio) + ref_mel = ref_mel.squeeze(0) + + # deal with batch + assert infer_batch_size > 0, "infer_batch_size should be greater than 0." + assert ( + min_tokens <= total_mel_len <= max_tokens + ), f"Audio {utt} has duration {total_mel_len*hop_length//target_sample_rate}s out of range [{min_secs}, {max_secs}]." + bucket_i = math.floor((total_mel_len - min_tokens) / (max_tokens - min_tokens + 1) * num_buckets) + + utts[bucket_i].append(utt) + ref_rms_list[bucket_i].append(ref_rms) + ref_mels[bucket_i].append(ref_mel) + ref_mel_lens[bucket_i].append(ref_mel_len) + total_mel_lens[bucket_i].append(total_mel_len) + final_text_list[bucket_i].extend(text_list) + + batch_accum[bucket_i] += total_mel_len + + if batch_accum[bucket_i] >= infer_batch_size: + # print(f"\n{len(ref_mels[bucket_i][0][0])}\n{ref_mel_lens[bucket_i]}\n{total_mel_lens[bucket_i]}") + prompts_all.append( + ( + utts[bucket_i], + ref_rms_list[bucket_i], + padded_mel_batch(ref_mels[bucket_i]), + ref_mel_lens[bucket_i], + total_mel_lens[bucket_i], + final_text_list[bucket_i], + ) + ) + batch_accum[bucket_i] = 0 + ( + utts[bucket_i], + ref_rms_list[bucket_i], + ref_mels[bucket_i], + ref_mel_lens[bucket_i], + total_mel_lens[bucket_i], + final_text_list[bucket_i], + ) = [], [], [], [], [], [] + + # add residual + for bucket_i, bucket_frames in enumerate(batch_accum): + if bucket_frames > 0: + prompts_all.append( + ( + utts[bucket_i], + ref_rms_list[bucket_i], + padded_mel_batch(ref_mels[bucket_i]), + ref_mel_lens[bucket_i], + total_mel_lens[bucket_i], + final_text_list[bucket_i], + ) + ) + # not only leave easy work for last workers + random.seed(666) + random.shuffle(prompts_all) + + return prompts_all + + +# get wav_res_ref_text of seed-tts test metalst +# https://github.com/BytedanceSpeech/seed-tts-eval + + +def get_seed_tts_test(metalst, gen_wav_dir, gpus): + f = open(metalst) + lines = f.readlines() + f.close() + + test_set_ = [] + for line in tqdm(lines): + if len(line.strip().split("|")) == 5: + utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|") + elif len(line.strip().split("|")) == 4: + utt, prompt_text, prompt_wav, gt_text = line.strip().split("|") + + if not os.path.exists(os.path.join(gen_wav_dir, utt + ".wav")): + continue + gen_wav = os.path.join(gen_wav_dir, utt + ".wav") + if not os.path.isabs(prompt_wav): + prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav) + + test_set_.append((gen_wav, prompt_wav, gt_text)) + + num_jobs = len(gpus) + if num_jobs == 1: + return [(gpus[0], test_set_)] + + wav_per_job = len(test_set_) // num_jobs + 1 + test_set = [] + for i in range(num_jobs): + test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job])) + + return test_set + + +# get librispeech test-clean cross sentence test + + +def get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth=False): + f = open(metalst) + lines = f.readlines() + f.close() + + test_set_ = [] + for line in tqdm(lines): + ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t") + + if eval_ground_truth: + gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-") + gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac") + else: + if not os.path.exists(os.path.join(gen_wav_dir, gen_utt + ".wav")): + raise FileNotFoundError(f"Generated wav not found: {gen_utt}") + gen_wav = os.path.join(gen_wav_dir, gen_utt + ".wav") + + ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-") + ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac") + + test_set_.append((gen_wav, ref_wav, gen_txt)) + + num_jobs = len(gpus) + if num_jobs == 1: + return [(gpus[0], test_set_)] + + wav_per_job = len(test_set_) // num_jobs + 1 + test_set = [] + for i in range(num_jobs): + test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job])) + + return test_set + + +# load asr model + + +def load_asr_model(lang, ckpt_dir=""): + if lang == "zh": + from funasr import AutoModel + + model = AutoModel( + model=os.path.join(ckpt_dir, "paraformer-zh"), + # vad_model = os.path.join(ckpt_dir, "fsmn-vad"), + # punc_model = os.path.join(ckpt_dir, "ct-punc"), + # spk_model = os.path.join(ckpt_dir, "cam++"), + disable_update=True, + ) # following seed-tts setting + elif lang == "en": + from faster_whisper import WhisperModel + + model_size = "large-v3" if ckpt_dir == "" else ckpt_dir + model = WhisperModel(model_size, device="cuda", compute_type="float16") + return model + + +# WER Evaluation, the way Seed-TTS does + + +def run_asr_wer(args): + rank, lang, test_set, ckpt_dir = args + + if lang == "zh": + import zhconv + + torch.cuda.set_device(rank) + elif lang == "en": + os.environ["CUDA_VISIBLE_DEVICES"] = str(rank) + else: + raise NotImplementedError( + "lang support only 'zh' (funasr paraformer-zh), 'en' (faster-whisper-large-v3), for now." + ) + + asr_model = load_asr_model(lang, ckpt_dir=ckpt_dir) + + from zhon.hanzi import punctuation + + punctuation_all = punctuation + string.punctuation + wer_results = [] + + from jiwer import compute_measures + + for gen_wav, prompt_wav, truth in tqdm(test_set): + if lang == "zh": + res = asr_model.generate(input=gen_wav, batch_size_s=300, disable_pbar=True) + hypo = res[0]["text"] + hypo = zhconv.convert(hypo, "zh-cn") + elif lang == "en": + segments, _ = asr_model.transcribe(gen_wav, beam_size=5, language="en") + hypo = "" + for segment in segments: + hypo = hypo + " " + segment.text + + raw_truth = truth + raw_hypo = hypo + + for x in punctuation_all: + truth = truth.replace(x, "") + hypo = hypo.replace(x, "") + + truth = truth.replace(" ", " ") + hypo = hypo.replace(" ", " ") + + if lang == "zh": + truth = " ".join([x for x in truth]) + hypo = " ".join([x for x in hypo]) + elif lang == "en": + truth = truth.lower() + hypo = hypo.lower() + + measures = compute_measures(truth, hypo) + wer = measures["wer"] + + # ref_list = truth.split(" ") + # subs = measures["substitutions"] / len(ref_list) + # dele = measures["deletions"] / len(ref_list) + # inse = measures["insertions"] / len(ref_list) + + wer_results.append( + { + "wav": Path(gen_wav).stem, + "truth": raw_truth, + "hypo": raw_hypo, + "wer": wer, + } + ) + + return wer_results + + +# SIM Evaluation + + +def run_sim(args): + rank, test_set, ckpt_dir = args + device = f"cuda:{rank}" + + model = ECAPA_TDNN_SMALL(feat_dim=1024, feat_type="wavlm_large", config_path=None) + state_dict = torch.load(ckpt_dir, weights_only=True, map_location=lambda storage, loc: storage) + model.load_state_dict(state_dict["model"], strict=False) + + use_gpu = True if torch.cuda.is_available() else False + if use_gpu: + model = model.cuda(device) + model.eval() + + sim_results = [] + for gen_wav, prompt_wav, truth in tqdm(test_set): + wav1, sr1 = torchaudio.load(gen_wav) + wav2, sr2 = torchaudio.load(prompt_wav) + + resample1 = torchaudio.transforms.Resample(orig_freq=sr1, new_freq=16000) + resample2 = torchaudio.transforms.Resample(orig_freq=sr2, new_freq=16000) + wav1 = resample1(wav1) + wav2 = resample2(wav2) + + if use_gpu: + wav1 = wav1.cuda(device) + wav2 = wav2.cuda(device) + with torch.no_grad(): + emb1 = model(wav1) + emb2 = model(wav2) + + sim = F.cosine_similarity(emb1, emb2)[0].item() + # print(f"VSim score between two audios: {sim:.4f} (-1.0, 1.0).") + sim_results.append( + { + "wav": Path(gen_wav).stem, + "sim": sim, + } + ) + + return sim_results diff --git a/src/f5_tts/infer/README.md b/src/f5_tts/infer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..afcc1fc4bf5790f5ba3e4471b575c9924f8d2a63 --- /dev/null +++ b/src/f5_tts/infer/README.md @@ -0,0 +1,154 @@ +# Inference + +The pretrained model checkpoints can be reached at [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS) and [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), or will be automatically downloaded when running inference scripts. + +**More checkpoints with whole community efforts can be found in [SHARED.md](SHARED.md), supporting more languages.** + +Currently support **30s for a single** generation, which is the **total length** including both prompt and output audio. However, you can provide `infer_cli` and `infer_gradio` with longer text, will automatically do chunk generation. Long reference audio will be **clip short to ~15s**. + +To avoid possible inference failures, make sure you have seen through the following instructions. + +- Use reference audio <15s and leave some silence (e.g. 1s) at the end. Otherwise there is a risk of truncating in the middle of word, leading to suboptimal generation. +- Uppercased letters will be uttered letter by letter, so use lowercased letters for normal words. +- Add some spaces (blank: " ") or punctuations (e.g. "," ".") to explicitly introduce some pauses. +- Preprocess numbers to Chinese letters if you want to have them read in Chinese, otherwise in English. +- If the generation output is blank (pure silence), check for ffmpeg installation (various tutorials online, blogs, videos, etc.). +- Try turn off use_ema if using an early-stage finetuned checkpoint (which goes just few updates). + + +## Gradio App + +Currently supported features: + +- Basic TTS with Chunk Inference +- Multi-Style / Multi-Speaker Generation +- Voice Chat powered by Qwen2.5-3B-Instruct +- [Custom inference with more language support](src/f5_tts/infer/SHARED.md) + +The cli command `f5-tts_infer-gradio` equals to `python src/f5_tts/infer/infer_gradio.py`, which launches a Gradio APP (web interface) for inference. + +The script will load model checkpoints from Huggingface. You can also manually download files and update the path to `load_model()` in `infer_gradio.py`. Currently only load TTS models first, will load ASR model to do transcription if `ref_text` not provided, will load LLM model if use Voice Chat. + +More flags options: + +```bash +# Automatically launch the interface in the default web browser +f5-tts_infer-gradio --inbrowser + +# Set the root path of the application, if it's not served from the root ("/") of the domain +# For example, if the application is served at "https://example.com/myapp" +f5-tts_infer-gradio --root_path "/myapp" +``` + +Could also be used as a component for larger application: +```python +import gradio as gr +from f5_tts.infer.infer_gradio import app + +with gr.Blocks() as main_app: + gr.Markdown("# This is an example of using F5-TTS within a bigger Gradio app") + + # ... other Gradio components + + app.render() + +main_app.launch() +``` + + +## CLI Inference + +The cli command `f5-tts_infer-cli` equals to `python src/f5_tts/infer/infer_cli.py`, which is a command line tool for inference. + +The script will load model checkpoints from Huggingface. You can also manually download files and use `--ckpt_file` to specify the model you want to load, or directly update in `infer_cli.py`. + +For change vocab.txt use `--vocab_file` to provide your `vocab.txt` file. + +Basically you can inference with flags: +```bash +# Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage) +f5-tts_infer-cli \ +--model F5TTS_v1_Base \ +--ref_audio "ref_audio.wav" \ +--ref_text "The content, subtitle or transcription of reference audio." \ +--gen_text "Some text you want TTS model generate for you." + +# Use BigVGAN as vocoder. Currently only support F5TTS_Base. +f5-tts_infer-cli --model F5TTS_Base --vocoder_name bigvgan --load_vocoder_from_local + +# Use custom path checkpoint, e.g. +f5-tts_infer-cli --ckpt_file ckpts/F5TTS_v1_Base/model_1250000.safetensors + +# More instructions +f5-tts_infer-cli --help +``` + +And a `.toml` file would help with more flexible usage. + +```bash +f5-tts_infer-cli -c custom.toml +``` + +For example, you can use `.toml` to pass in variables, refer to `src/f5_tts/infer/examples/basic/basic.toml`: + +```toml +# F5TTS_v1_Base | E2TTS_Base +model = "F5TTS_v1_Base" +ref_audio = "infer/examples/basic/basic_ref_en.wav" +# If an empty "", transcribes the reference audio automatically. +ref_text = "Some call me nature, others call me mother nature." +gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring." +# File with text to generate. Ignores the text above. +gen_file = "" +remove_silence = false +output_dir = "tests" +``` + +You can also leverage `.toml` file to do multi-style generation, refer to `src/f5_tts/infer/examples/multi/story.toml`. + +```toml +# F5TTS_v1_Base | E2TTS_Base +model = "F5TTS_v1_Base" +ref_audio = "infer/examples/multi/main.flac" +# If an empty "", transcribes the reference audio automatically. +ref_text = "" +gen_text = "" +# File with text to generate. Ignores the text above. +gen_file = "infer/examples/multi/story.txt" +remove_silence = true +output_dir = "tests" + +[voices.town] +ref_audio = "infer/examples/multi/town.flac" +ref_text = "" + +[voices.country] +ref_audio = "infer/examples/multi/country.flac" +ref_text = "" +``` +You should mark the voice with `[main]` `[town]` `[country]` whenever you want to change voice, refer to `src/f5_tts/infer/examples/multi/story.txt`. + +## Socket Real-time Service + +Real-time voice output with chunk stream: + +```bash +# Start socket server +python src/f5_tts/socket_server.py + +# If PyAudio not installed +sudo apt-get install portaudio19-dev +pip install pyaudio + +# Communicate with socket client +python src/f5_tts/socket_client.py +``` + +## Speech Editing + +To test speech editing capabilities, use the following command: + +```bash +python src/f5_tts/infer/speech_edit.py +``` + diff --git a/src/f5_tts/infer/SHARED.md b/src/f5_tts/infer/SHARED.md new file mode 100644 index 0000000000000000000000000000000000000000..79d7f56e22d64fcf2161efeac8a5ab7cdc007ea2 --- /dev/null +++ b/src/f5_tts/infer/SHARED.md @@ -0,0 +1,174 @@ + +# Shared Model Cards + + +### **Prerequisites of using** +- This document is serving as a quick lookup table for the community training/finetuning result, with various language support. +- The models in this repository are open source and are based on voluntary contributions from contributors. +- The use of models must be conditioned on respect for the respective creators. The convenience brought comes from their efforts. + + +### **Welcome to share here** +- Have a pretrained/finetuned result: model checkpoint (pruned best to facilitate inference, i.e. leave only `ema_model_state_dict`) and corresponding vocab file (for tokenization). +- Host a public [huggingface model repository](https://huggingface.co/new) and upload the model related files. +- Make a pull request adding a model card to the current page, i.e. `src\f5_tts\infer\SHARED.md`. + + +### Supported Languages +- [Multilingual](#multilingual) + - [F5-TTS v1 v0 Base @ zh \& en @ F5-TTS](#f5-tts-v1-v0-base--zh--en--f5-tts) +- [English](#english) +- [Finnish](#finnish) + - [F5-TTS Base @ fi @ AsmoKoskinen](#f5-tts-base--fi--asmokoskinen) +- [French](#french) + - [F5-TTS Base @ fr @ RASPIAUDIO](#f5-tts-base--fr--raspiaudio) +- [Hindi](#hindi) + - [F5-TTS Small @ hi @ SPRINGLab](#f5-tts-small--hi--springlab) +- [Italian](#italian) + - [F5-TTS Base @ it @ alien79](#f5-tts-base--it--alien79) +- [Japanese](#japanese) + - [F5-TTS Base @ ja @ Jmica](#f5-tts-base--ja--jmica) +- [Mandarin](#mandarin) +- [Russian](#russian) + - [F5-TTS Base @ ru @ HotDro4illa](#f5-tts-base--ru--hotdro4illa) +- [Spanish](#spanish) + - [F5-TTS Base @ es @ jpgallegoar](#f5-tts-base--es--jpgallegoar) + + +## Multilingual + +#### F5-TTS v1 v0 Base @ zh & en @ F5-TTS +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS v1 Base|[ckpt & vocab](https://huggingface.co/SWivid/F5-TTS/tree/main/F5TTS_v1_Base)|[Emilia 95K zh&en](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07)|cc-by-nc-4.0| + +```bash +Model: hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors +Vocab: hf://SWivid/F5-TTS/F5TTS_v1_Base/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "conv_layers": 4} +``` + +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/SWivid/F5-TTS/tree/main/F5TTS_Base)|[Emilia 95K zh&en](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07)|cc-by-nc-4.0| + +```bash +Model: hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors +Vocab: hf://SWivid/F5-TTS/F5TTS_Base/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + +*Other infos, e.g. Author info, Github repo, Link to some sampled results, Usage instruction, Tutorial (Blog, Video, etc.) ...* + + +## English + + +## Finnish + +#### F5-TTS Base @ fi @ AsmoKoskinen +|Model|🤗Hugging Face|Data|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/AsmoKoskinen/F5-TTS_Finnish_Model)|[Common Voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0), [Vox Populi](https://huggingface.co/datasets/facebook/voxpopuli)|cc-by-nc-4.0| + +```bash +Model: hf://AsmoKoskinen/F5-TTS_Finnish_Model/model_common_voice_fi_vox_populi_fi_20241206.safetensors +Vocab: hf://AsmoKoskinen/F5-TTS_Finnish_Model/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + + +## French + +#### F5-TTS Base @ fr @ RASPIAUDIO +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/RASPIAUDIO/F5-French-MixedSpeakers-reduced)|[LibriVox](https://librivox.org/)|cc-by-nc-4.0| + +```bash +Model: hf://RASPIAUDIO/F5-French-MixedSpeakers-reduced/model_last_reduced.pt +Vocab: hf://RASPIAUDIO/F5-French-MixedSpeakers-reduced/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + +- [Online Inference with Hugging Face Space](https://huggingface.co/spaces/RASPIAUDIO/f5-tts_french). +- [Tutorial video to train a new language model](https://www.youtube.com/watch?v=UO4usaOojys). +- [Discussion about this training can be found here](https://github.com/SWivid/F5-TTS/issues/434). + + +## Hindi + +#### F5-TTS Small @ hi @ SPRINGLab +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Small|[ckpt & vocab](https://huggingface.co/SPRINGLab/F5-Hindi-24KHz)|[IndicTTS Hi](https://huggingface.co/datasets/SPRINGLab/IndicTTS-Hindi) & [IndicVoices-R Hi](https://huggingface.co/datasets/SPRINGLab/IndicVoices-R_Hindi) |cc-by-4.0| + +```bash +Model: hf://SPRINGLab/F5-Hindi-24KHz/model_2500000.safetensors +Vocab: hf://SPRINGLab/F5-Hindi-24KHz/vocab.txt +Config: {"dim": 768, "depth": 18, "heads": 12, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + +- Authors: SPRING Lab, Indian Institute of Technology, Madras +- Website: https://asr.iitm.ac.in/ + + +## Italian + +#### F5-TTS Base @ it @ alien79 +|Model|🤗Hugging Face|Data|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/alien79/F5-TTS-italian)|[ylacombe/cml-tts](https://huggingface.co/datasets/ylacombe/cml-tts) |cc-by-nc-4.0| + +```bash +Model: hf://alien79/F5-TTS-italian/model_159600.safetensors +Vocab: hf://alien79/F5-TTS-italian/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + +- Trained by [Mithril Man](https://github.com/MithrilMan) +- Model details on [hf project home](https://huggingface.co/alien79/F5-TTS-italian) +- Open to collaborations to further improve the model + + +## Japanese + +#### F5-TTS Base @ ja @ Jmica +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/Jmica/F5TTS/tree/main/JA_25498980)|[Emilia 1.7k JA](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07) & [Galgame Dataset 5.4k](https://huggingface.co/datasets/OOPPEENN/Galgame_Dataset)|cc-by-nc-4.0| + +```bash +Model: hf://Jmica/F5TTS/JA_25498980/model_25498980.pt +Vocab: hf://Jmica/F5TTS/JA_25498980/vocab_updated.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` + + +## Mandarin + + +## Russian + +#### F5-TTS Base @ ru @ HotDro4illa +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/hotstone228/F5-TTS-Russian)|[Common voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0)|cc-by-nc-4.0| + +```bash +Model: hf://hotstone228/F5-TTS-Russian/model_last.safetensors +Vocab: hf://hotstone228/F5-TTS-Russian/vocab.txt +Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1} +``` +- Finetuned by [HotDro4illa](https://github.com/HotDro4illa) +- Any improvements are welcome + + +## Spanish + +#### F5-TTS Base @ es @ jpgallegoar +|Model|🤗Hugging Face|Data (Hours)|Model License| +|:---:|:------------:|:-----------:|:-------------:| +|F5-TTS Base|[ckpt & vocab](https://huggingface.co/jpgallegoar/F5-Spanish)|[Voxpopuli](https://huggingface.co/datasets/facebook/voxpopuli) & Crowdsourced & TEDx, 218 hours|cc0-1.0| + +- @jpgallegoar [GitHub repo](https://github.com/jpgallegoar/Spanish-F5), Jupyter Notebook and Gradio usage for Spanish model. diff --git a/src/f5_tts/infer/__pycache__/infer_cli.cpython-310.pyc b/src/f5_tts/infer/__pycache__/infer_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc51877c581cf39343122c71364d0a5d8ba6755e Binary files /dev/null and b/src/f5_tts/infer/__pycache__/infer_cli.cpython-310.pyc differ diff --git a/src/f5_tts/infer/__pycache__/utils_infer.cpython-310.pyc b/src/f5_tts/infer/__pycache__/utils_infer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b4671d3c3f3b126f9345c54638cf984f7939ed5 Binary files /dev/null and b/src/f5_tts/infer/__pycache__/utils_infer.cpython-310.pyc differ diff --git a/src/f5_tts/infer/examples/basic/basic.toml b/src/f5_tts/infer/examples/basic/basic.toml new file mode 100644 index 0000000000000000000000000000000000000000..bc3ebb4e3f96368f5d5e7d5ad347a7c852f6f1cb --- /dev/null +++ b/src/f5_tts/infer/examples/basic/basic.toml @@ -0,0 +1,11 @@ +# F5TTS_v1_Base | E2TTS_Base +model = "F5TTS_v1_Base" +ref_audio = "infer/examples/basic/basic_ref_en.wav" +# If an empty "", transcribes the reference audio automatically. +ref_text = "Some call me nature, others call me mother nature." +gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring." +# File with text to generate. Ignores the text above. +gen_file = "" +remove_silence = false +output_dir = "tests" +output_file = "infer_cli_basic.wav" diff --git a/src/f5_tts/infer/examples/multi/story.toml b/src/f5_tts/infer/examples/multi/story.toml new file mode 100644 index 0000000000000000000000000000000000000000..f073c26d7335b5ad4ce33b41e18bfa46c87914c8 --- /dev/null +++ b/src/f5_tts/infer/examples/multi/story.toml @@ -0,0 +1,20 @@ +# F5TTS_v1_Base | E2TTS_Base +model = "F5TTS_v1_Base" +ref_audio = "infer/examples/multi/main.flac" +# If an empty "", transcribes the reference audio automatically. +ref_text = "" +gen_text = "" +# File with text to generate. Ignores the text above. +gen_file = "infer/examples/multi/story.txt" +remove_silence = true +output_dir = "tests" +output_file = "infer_cli_story.wav" + +[voices.town] +ref_audio = "infer/examples/multi/town.flac" +ref_text = "" + +[voices.country] +ref_audio = "infer/examples/multi/country.flac" +ref_text = "" + diff --git a/src/f5_tts/infer/examples/multi/story.txt b/src/f5_tts/infer/examples/multi/story.txt new file mode 100644 index 0000000000000000000000000000000000000000..bda1f2ba1b967d2e63fdaac3b987fcb54574d76f --- /dev/null +++ b/src/f5_tts/infer/examples/multi/story.txt @@ -0,0 +1 @@ +A Town Mouse and a Country Mouse were acquaintances, and the Country Mouse one day invited his friend to come and see him at his home in the fields. The Town Mouse came, and they sat down to a dinner of barleycorns and roots, the latter of which had a distinctly earthy flavour. The fare was not much to the taste of the guest, and presently he broke out with [town] “My poor dear friend, you live here no better than the ants. Now, you should just see how I fare! My larder is a regular horn of plenty. You must come and stay with me, and I promise you you shall live on the fat of the land.” [main] So when he returned to town he took the Country Mouse with him, and showed him into a larder containing flour and oatmeal and figs and honey and dates. The Country Mouse had never seen anything like it, and sat down to enjoy the luxuries his friend provided: but before they had well begun, the door of the larder opened and someone came in. The two Mice scampered off and hid themselves in a narrow and exceedingly uncomfortable hole. Presently, when all was quiet, they ventured out again; but someone else came in, and off they scuttled again. This was too much for the visitor. [country] “Goodbye,” [main] said he, [country] “I’m off. You live in the lap of luxury, I can see, but you are surrounded by dangers; whereas at home I can enjoy my simple dinner of roots and corn in peace.” \ No newline at end of file diff --git a/src/f5_tts/infer/examples/vocab.txt b/src/f5_tts/infer/examples/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..a30a90c12e1ab38b95c97770d5c5cd1d03c392e2 --- /dev/null +++ b/src/f5_tts/infer/examples/vocab.txt @@ -0,0 +1,2545 @@ + +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +_ +a +a1 +ai1 +ai2 +ai3 +ai4 +an1 +an3 +an4 +ang1 +ang2 +ang4 +ao1 +ao2 +ao3 +ao4 +b +ba +ba1 +ba2 +ba3 +ba4 +bai1 +bai2 +bai3 +bai4 +ban1 +ban2 +ban3 +ban4 +bang1 +bang2 +bang3 +bang4 +bao1 +bao2 +bao3 +bao4 +bei +bei1 +bei2 +bei3 +bei4 +ben1 +ben2 +ben3 +ben4 +beng +beng1 +beng2 +beng3 +beng4 +bi1 +bi2 +bi3 +bi4 +bian1 +bian2 +bian3 +bian4 +biao1 +biao2 +biao3 +bie1 +bie2 +bie3 +bie4 +bin1 +bin4 +bing1 +bing2 +bing3 +bing4 +bo +bo1 +bo2 +bo3 +bo4 +bu2 +bu3 +bu4 +c +ca1 +cai1 +cai2 +cai3 +cai4 +can1 +can2 +can3 +can4 +cang1 +cang2 +cao1 +cao2 +cao3 +ce4 +cen1 +cen2 +ceng1 +ceng2 +ceng4 +cha1 +cha2 +cha3 +cha4 +chai1 +chai2 +chan1 +chan2 +chan3 +chan4 +chang1 +chang2 +chang3 +chang4 +chao1 +chao2 +chao3 +che1 +che2 +che3 +che4 +chen1 +chen2 +chen3 +chen4 +cheng1 +cheng2 +cheng3 +cheng4 +chi1 +chi2 +chi3 +chi4 +chong1 +chong2 +chong3 +chong4 +chou1 +chou2 +chou3 +chou4 +chu1 +chu2 +chu3 +chu4 +chua1 +chuai1 +chuai2 +chuai3 +chuai4 +chuan1 +chuan2 +chuan3 +chuan4 +chuang1 +chuang2 +chuang3 +chuang4 +chui1 +chui2 +chun1 +chun2 +chun3 +chuo1 +chuo4 +ci1 +ci2 +ci3 +ci4 +cong1 +cong2 +cou4 +cu1 +cu4 +cuan1 +cuan2 +cuan4 +cui1 +cui3 +cui4 +cun1 +cun2 +cun4 +cuo1 +cuo2 +cuo4 +d +da +da1 +da2 +da3 +da4 +dai1 +dai2 +dai3 +dai4 +dan1 +dan2 +dan3 +dan4 +dang1 +dang2 +dang3 +dang4 +dao1 +dao2 +dao3 +dao4 +de +de1 +de2 +dei3 +den4 +deng1 +deng2 +deng3 +deng4 +di1 +di2 +di3 +di4 +dia3 +dian1 +dian2 +dian3 +dian4 +diao1 +diao3 +diao4 +die1 +die2 +die4 +ding1 +ding2 +ding3 +ding4 +diu1 +dong1 +dong3 +dong4 +dou1 +dou2 +dou3 +dou4 +du1 +du2 +du3 +du4 +duan1 +duan2 +duan3 +duan4 +dui1 +dui4 +dun1 +dun3 +dun4 +duo1 +duo2 +duo3 +duo4 +e +e1 +e2 +e3 +e4 +ei2 +en1 +en4 +er +er2 +er3 +er4 +f +fa1 +fa2 +fa3 +fa4 +fan1 +fan2 +fan3 +fan4 +fang1 +fang2 +fang3 +fang4 +fei1 +fei2 +fei3 +fei4 +fen1 +fen2 +fen3 +fen4 +feng1 +feng2 +feng3 +feng4 +fo2 +fou2 +fou3 +fu1 +fu2 +fu3 +fu4 +g +ga1 +ga2 +ga3 +ga4 +gai1 +gai2 +gai3 +gai4 +gan1 +gan2 +gan3 +gan4 +gang1 +gang2 +gang3 +gang4 +gao1 +gao2 +gao3 +gao4 +ge1 +ge2 +ge3 +ge4 +gei2 +gei3 +gen1 +gen2 +gen3 +gen4 +geng1 +geng3 +geng4 +gong1 +gong3 +gong4 +gou1 +gou2 +gou3 +gou4 +gu +gu1 +gu2 +gu3 +gu4 +gua1 +gua2 +gua3 +gua4 +guai1 +guai2 +guai3 +guai4 +guan1 +guan2 +guan3 +guan4 +guang1 +guang2 +guang3 +guang4 +gui1 +gui2 +gui3 +gui4 +gun3 +gun4 +guo1 +guo2 +guo3 +guo4 +h +ha1 +ha2 +ha3 +hai1 +hai2 +hai3 +hai4 +han1 +han2 +han3 +han4 +hang1 +hang2 +hang4 +hao1 +hao2 +hao3 +hao4 +he1 +he2 +he4 +hei1 +hen2 +hen3 +hen4 +heng1 +heng2 +heng4 +hong1 +hong2 +hong3 +hong4 +hou1 +hou2 +hou3 +hou4 +hu1 +hu2 +hu3 +hu4 +hua1 +hua2 +hua4 +huai2 +huai4 +huan1 +huan2 +huan3 +huan4 +huang1 +huang2 +huang3 +huang4 +hui1 +hui2 +hui3 +hui4 +hun1 +hun2 +hun4 +huo +huo1 +huo2 +huo3 +huo4 +i +j +ji1 +ji2 +ji3 +ji4 +jia +jia1 +jia2 +jia3 +jia4 +jian1 +jian2 +jian3 +jian4 +jiang1 +jiang2 +jiang3 +jiang4 +jiao1 +jiao2 +jiao3 +jiao4 +jie1 +jie2 +jie3 +jie4 +jin1 +jin2 +jin3 +jin4 +jing1 +jing2 +jing3 +jing4 +jiong3 +jiu1 +jiu2 +jiu3 +jiu4 +ju1 +ju2 +ju3 +ju4 +juan1 +juan2 +juan3 +juan4 +jue1 +jue2 +jue4 +jun1 +jun4 +k +ka1 +ka2 +ka3 +kai1 +kai2 +kai3 +kai4 +kan1 +kan2 +kan3 +kan4 +kang1 +kang2 +kang4 +kao1 +kao2 +kao3 +kao4 +ke1 +ke2 +ke3 +ke4 +ken3 +keng1 +kong1 +kong3 +kong4 +kou1 +kou2 +kou3 +kou4 +ku1 +ku2 +ku3 +ku4 +kua1 +kua3 +kua4 +kuai3 +kuai4 +kuan1 +kuan2 +kuan3 +kuang1 +kuang2 +kuang4 +kui1 +kui2 +kui3 +kui4 +kun1 +kun3 +kun4 +kuo4 +l +la +la1 +la2 +la3 +la4 +lai2 +lai4 +lan2 +lan3 +lan4 +lang1 +lang2 +lang3 +lang4 +lao1 +lao2 +lao3 +lao4 +le +le1 +le4 +lei +lei1 +lei2 +lei3 +lei4 +leng1 +leng2 +leng3 +leng4 +li +li1 +li2 +li3 +li4 +lia3 +lian2 +lian3 +lian4 +liang2 +liang3 +liang4 +liao1 +liao2 +liao3 +liao4 +lie1 +lie2 +lie3 +lie4 +lin1 +lin2 +lin3 +lin4 +ling2 +ling3 +ling4 +liu1 +liu2 +liu3 +liu4 +long1 +long2 +long3 +long4 +lou1 +lou2 +lou3 +lou4 +lu1 +lu2 +lu3 +lu4 +luan2 +luan3 +luan4 +lun1 +lun2 +lun4 +luo1 +luo2 +luo3 +luo4 +lv2 +lv3 +lv4 +lve3 +lve4 +m +ma +ma1 +ma2 +ma3 +ma4 +mai2 +mai3 +mai4 +man1 +man2 +man3 +man4 +mang2 +mang3 +mao1 +mao2 +mao3 +mao4 +me +mei2 +mei3 +mei4 +men +men1 +men2 +men4 +meng +meng1 +meng2 +meng3 +meng4 +mi1 +mi2 +mi3 +mi4 +mian2 +mian3 +mian4 +miao1 +miao2 +miao3 +miao4 +mie1 +mie4 +min2 +min3 +ming2 +ming3 +ming4 +miu4 +mo1 +mo2 +mo3 +mo4 +mou1 +mou2 +mou3 +mu2 +mu3 +mu4 +n +n2 +na1 +na2 +na3 +na4 +nai2 +nai3 +nai4 +nan1 +nan2 +nan3 +nan4 +nang1 +nang2 +nang3 +nao1 +nao2 +nao3 +nao4 +ne +ne2 +ne4 +nei3 +nei4 +nen4 +neng2 +ni1 +ni2 +ni3 +ni4 +nian1 +nian2 +nian3 +nian4 +niang2 +niang4 +niao2 +niao3 +niao4 +nie1 +nie4 +nin2 +ning2 +ning3 +ning4 +niu1 +niu2 +niu3 +niu4 +nong2 +nong4 +nou4 +nu2 +nu3 +nu4 +nuan3 +nuo2 +nuo4 +nv2 +nv3 +nve4 +o +o1 +o2 +ou1 +ou2 +ou3 +ou4 +p +pa1 +pa2 +pa4 +pai1 +pai2 +pai3 +pai4 +pan1 +pan2 +pan4 +pang1 +pang2 +pang4 +pao1 +pao2 +pao3 +pao4 +pei1 +pei2 +pei4 +pen1 +pen2 +pen4 +peng1 +peng2 +peng3 +peng4 +pi1 +pi2 +pi3 +pi4 +pian1 +pian2 +pian4 +piao1 +piao2 +piao3 +piao4 +pie1 +pie2 +pie3 +pin1 +pin2 +pin3 +pin4 +ping1 +ping2 +po1 +po2 +po3 +po4 +pou1 +pu1 +pu2 +pu3 +pu4 +q +qi1 +qi2 +qi3 +qi4 +qia1 +qia3 +qia4 +qian1 +qian2 +qian3 +qian4 +qiang1 +qiang2 +qiang3 +qiang4 +qiao1 +qiao2 +qiao3 +qiao4 +qie1 +qie2 +qie3 +qie4 +qin1 +qin2 +qin3 +qin4 +qing1 +qing2 +qing3 +qing4 +qiong1 +qiong2 +qiu1 +qiu2 +qiu3 +qu1 +qu2 +qu3 +qu4 +quan1 +quan2 +quan3 +quan4 +que1 +que2 +que4 +qun2 +r +ran2 +ran3 +rang1 +rang2 +rang3 +rang4 +rao2 +rao3 +rao4 +re2 +re3 +re4 +ren2 +ren3 +ren4 +reng1 +reng2 +ri4 +rong1 +rong2 +rong3 +rou2 +rou4 +ru2 +ru3 +ru4 +ruan2 +ruan3 +rui3 +rui4 +run4 +ruo4 +s +sa1 +sa2 +sa3 +sa4 +sai1 +sai4 +san1 +san2 +san3 +san4 +sang1 +sang3 +sang4 +sao1 +sao2 +sao3 +sao4 +se4 +sen1 +seng1 +sha1 +sha2 +sha3 +sha4 +shai1 +shai2 +shai3 +shai4 +shan1 +shan3 +shan4 +shang +shang1 +shang3 +shang4 +shao1 +shao2 +shao3 +shao4 +she1 +she2 +she3 +she4 +shei2 +shen1 +shen2 +shen3 +shen4 +sheng1 +sheng2 +sheng3 +sheng4 +shi +shi1 +shi2 +shi3 +shi4 +shou1 +shou2 +shou3 +shou4 +shu1 +shu2 +shu3 +shu4 +shua1 +shua2 +shua3 +shua4 +shuai1 +shuai3 +shuai4 +shuan1 +shuan4 +shuang1 +shuang3 +shui2 +shui3 +shui4 +shun3 +shun4 +shuo1 +shuo4 +si1 +si2 +si3 +si4 +song1 +song3 +song4 +sou1 +sou3 +sou4 +su1 +su2 +su4 +suan1 +suan4 +sui1 +sui2 +sui3 +sui4 +sun1 +sun3 +suo +suo1 +suo2 +suo3 +t +ta1 +ta2 +ta3 +ta4 +tai1 +tai2 +tai4 +tan1 +tan2 +tan3 +tan4 +tang1 +tang2 +tang3 +tang4 +tao1 +tao2 +tao3 +tao4 +te4 +teng2 +ti1 +ti2 +ti3 +ti4 +tian1 +tian2 +tian3 +tiao1 +tiao2 +tiao3 +tiao4 +tie1 +tie2 +tie3 +tie4 +ting1 +ting2 +ting3 +tong1 +tong2 +tong3 +tong4 +tou +tou1 +tou2 +tou4 +tu1 +tu2 +tu3 +tu4 +tuan1 +tuan2 +tui1 +tui2 +tui3 +tui4 +tun1 +tun2 +tun4 +tuo1 +tuo2 +tuo3 +tuo4 +u +v +w +wa +wa1 +wa2 +wa3 +wa4 +wai1 +wai3 +wai4 +wan1 +wan2 +wan3 +wan4 +wang1 +wang2 +wang3 +wang4 +wei1 +wei2 +wei3 +wei4 +wen1 +wen2 +wen3 +wen4 +weng1 +weng4 +wo1 +wo2 +wo3 +wo4 +wu1 +wu2 +wu3 +wu4 +x +xi1 +xi2 +xi3 +xi4 +xia1 +xia2 +xia4 +xian1 +xian2 +xian3 +xian4 +xiang1 +xiang2 +xiang3 +xiang4 +xiao1 +xiao2 +xiao3 +xiao4 +xie1 +xie2 +xie3 +xie4 +xin1 +xin2 +xin4 +xing1 +xing2 +xing3 +xing4 +xiong1 +xiong2 +xiu1 +xiu3 +xiu4 +xu +xu1 +xu2 +xu3 +xu4 +xuan1 +xuan2 +xuan3 +xuan4 +xue1 +xue2 +xue3 +xue4 +xun1 +xun2 +xun4 +y +ya +ya1 +ya2 +ya3 +ya4 +yan1 +yan2 +yan3 +yan4 +yang1 +yang2 +yang3 +yang4 +yao1 +yao2 +yao3 +yao4 +ye1 +ye2 +ye3 +ye4 +yi +yi1 +yi2 +yi3 +yi4 +yin1 +yin2 +yin3 +yin4 +ying1 +ying2 +ying3 +ying4 +yo1 +yong1 +yong2 +yong3 +yong4 +you1 +you2 +you3 +you4 +yu1 +yu2 +yu3 +yu4 +yuan1 +yuan2 +yuan3 +yuan4 +yue1 +yue4 +yun1 +yun2 +yun3 +yun4 +z +za1 +za2 +za3 +zai1 +zai3 +zai4 +zan1 +zan2 +zan3 +zan4 +zang1 +zang4 +zao1 +zao2 +zao3 +zao4 +ze2 +ze4 +zei2 +zen3 +zeng1 +zeng4 +zha1 +zha2 +zha3 +zha4 +zhai1 +zhai2 +zhai3 +zhai4 +zhan1 +zhan2 +zhan3 +zhan4 +zhang1 +zhang2 +zhang3 +zhang4 +zhao1 +zhao2 +zhao3 +zhao4 +zhe +zhe1 +zhe2 +zhe3 +zhe4 +zhen1 +zhen2 +zhen3 +zhen4 +zheng1 +zheng2 +zheng3 +zheng4 +zhi1 +zhi2 +zhi3 +zhi4 +zhong1 +zhong2 +zhong3 +zhong4 +zhou1 +zhou2 +zhou3 +zhou4 +zhu1 +zhu2 +zhu3 +zhu4 +zhua1 +zhua2 +zhua3 +zhuai1 +zhuai3 +zhuai4 +zhuan1 +zhuan2 +zhuan3 +zhuan4 +zhuang1 +zhuang4 +zhui1 +zhui4 +zhun1 +zhun2 +zhun3 +zhuo1 +zhuo2 +zi +zi1 +zi2 +zi3 +zi4 +zong1 +zong2 +zong3 +zong4 +zou1 +zou2 +zou3 +zou4 +zu1 +zu2 +zu3 +zuan1 +zuan3 +zuan4 +zui2 +zui3 +zui4 +zun1 +zuo +zuo1 +zuo2 +zuo3 +zuo4 +{ +~ +¡ +¢ +£ +¥ +§ +¨ +© +« +® +¯ +° +± +² +³ +´ +µ +· +¹ +º +» +¼ +½ +¾ +¿ +À +Á + +à +Ä +Å +Æ +Ç +È +É +Ê +Í +Î +Ñ +Ó +Ö +× +Ø +Ú +Ü +Ý +Þ +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +ì +í +î +ï +ð +ñ +ò +ó +ô +õ +ö +ø +ù +ú +û +ü +ý +Ā +ā +ă +ą +ć +Č +č +Đ +đ +ē +ė +ę +ě +ĝ +ğ +ħ +ī +į +İ +ı +Ł +ł +ń +ņ +ň +ŋ +Ō +ō +ő +œ +ř +Ś +ś +Ş +ş +Š +š +Ť +ť +ũ +ū +ź +Ż +ż +Ž +ž +ơ +ư +ǎ +ǐ +ǒ +ǔ +ǚ +ș +ț +ɑ +ɔ +ɕ +ə +ɛ +ɜ +ɡ +ɣ +ɪ +ɫ +ɴ +ɹ +ɾ +ʃ +ʊ +ʌ +ʒ +ʔ +ʰ +ʷ +ʻ +ʾ +ʿ +ˈ +ː +˙ +˜ +ˢ +́ +̅ +Α +Β +Δ +Ε +Θ +Κ +Λ +Μ +Ξ +Π +Σ +Τ +Φ +Χ +Ψ +Ω +ά +έ +ή +ί +α +β +γ +δ +ε +ζ +η +θ +ι +κ +λ +μ +ν +ξ +ο +π +ρ +ς +σ +τ +υ +φ +χ +ψ +ω +ϊ +ό +ύ +ώ +ϕ +ϵ +Ё +А +Б +В +Г +Д +Е +Ж +З +И +Й +К +Л +М +Н +О +П +Р +С +Т +У +Ф +Х +Ц +Ч +Ш +Щ +Ы +Ь +Э +Ю +Я +а +б +в +г +д +е +ж +з +и +й +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ы +ь +э +ю +я +ё +і +ְ +ִ +ֵ +ֶ +ַ +ָ +ֹ +ּ +־ +ׁ +א +ב +ג +ד +ה +ו +ז +ח +ט +י +כ +ל +ם +מ +ן +נ +ס +ע +פ +ק +ר +ש +ת +أ +ب +ة +ت +ج +ح +د +ر +ز +س +ص +ط +ع +ق +ك +ل +م +ن +ه +و +ي +َ +ُ +ِ +ْ +ก +ข +ง +จ +ต +ท +น +ป +ย +ร +ว +ส +ห +อ +ฮ +ั +า +ี +ึ +โ +ใ +ไ +่ +้ +์ +ḍ +Ḥ +ḥ +ṁ +ṃ +ṅ +ṇ +Ṛ +ṛ +Ṣ +ṣ +Ṭ +ṭ +ạ +ả +Ấ +ấ +ầ +ậ +ắ +ằ +ẻ +ẽ +ế +ề +ể +ễ +ệ +ị +ọ +ỏ +ố +ồ +ộ +ớ +ờ +ở +ụ +ủ +ứ +ữ +ἀ +ἁ +Ἀ +ἐ +ἔ +ἰ +ἱ +ὀ +ὁ +ὐ +ὲ +ὸ +ᾶ +᾽ +ῆ +ῇ +ῶ +‎ +‑ +‒ +– +— +― +‖ +† +‡ +• +… +‧ +‬ +′ +″ +⁄ +⁡ +⁰ +⁴ +⁵ +⁶ +⁷ +⁸ +⁹ +₁ +₂ +₃ +€ +₱ +₹ +₽ +℃ +ℏ +ℓ +№ +ℝ +™ +⅓ +⅔ +⅛ +→ +∂ +∈ +∑ +− +∗ +√ +∞ +∫ +≈ +≠ +≡ +≤ +≥ +⋅ +⋯ +█ +♪ +⟨ +⟩ +、 +。 +《 +》 +「 +」 +【 +】 +あ +う +え +お +か +が +き +ぎ +く +ぐ +け +げ +こ +ご +さ +し +じ +す +ず +せ +ぜ +そ +ぞ +た +だ +ち +っ +つ +で +と +ど +な +に +ね +の +は +ば +ひ +ぶ +へ +べ +ま +み +む +め +も +ゃ +や +ゆ +ょ +よ +ら +り +る +れ +ろ +わ +を +ん +ァ +ア +ィ +イ +ウ +ェ +エ +オ +カ +ガ +キ +ク +ケ +ゲ +コ +ゴ +サ +ザ +シ +ジ +ス +ズ +セ +ゾ +タ +ダ +チ +ッ +ツ +テ +デ +ト +ド +ナ +ニ +ネ +ノ +バ +パ +ビ +ピ +フ +プ +ヘ +ベ +ペ +ホ +ボ +ポ +マ +ミ +ム +メ +モ +ャ +ヤ +ュ +ユ +ョ +ヨ +ラ +リ +ル +レ +ロ +ワ +ン +・ +ー +ㄋ +ㄍ +ㄎ +ㄏ +ㄓ +ㄕ +ㄚ +ㄜ +ㄟ +ㄤ +ㄥ +ㄧ +ㄱ +ㄴ +ㄷ +ㄹ +ㅁ +ㅂ +ㅅ +ㅈ +ㅍ +ㅎ +ㅏ +ㅓ +ㅗ +ㅜ +ㅡ +ㅣ +㗎 +가 +각 +간 +갈 +감 +갑 +갓 +갔 +강 +같 +개 +거 +건 +걸 +겁 +것 +겉 +게 +겠 +겨 +결 +겼 +경 +계 +고 +곤 +골 +곱 +공 +과 +관 +광 +교 +구 +국 +굴 +귀 +귄 +그 +근 +글 +금 +기 +긴 +길 +까 +깍 +깔 +깜 +깨 +께 +꼬 +꼭 +꽃 +꾸 +꿔 +끔 +끗 +끝 +끼 +나 +난 +날 +남 +납 +내 +냐 +냥 +너 +넘 +넣 +네 +녁 +년 +녕 +노 +녹 +놀 +누 +눈 +느 +는 +늘 +니 +님 +닙 +다 +닥 +단 +달 +닭 +당 +대 +더 +덕 +던 +덥 +데 +도 +독 +동 +돼 +됐 +되 +된 +될 +두 +둑 +둥 +드 +들 +등 +디 +따 +딱 +딸 +땅 +때 +떤 +떨 +떻 +또 +똑 +뚱 +뛰 +뜻 +띠 +라 +락 +란 +람 +랍 +랑 +래 +랜 +러 +런 +럼 +렇 +레 +려 +력 +렵 +렸 +로 +록 +롬 +루 +르 +른 +를 +름 +릉 +리 +릴 +림 +마 +막 +만 +많 +말 +맑 +맙 +맛 +매 +머 +먹 +멍 +메 +면 +명 +몇 +모 +목 +몸 +못 +무 +문 +물 +뭐 +뭘 +미 +민 +밌 +밑 +바 +박 +밖 +반 +받 +발 +밤 +밥 +방 +배 +백 +밸 +뱀 +버 +번 +벌 +벚 +베 +벼 +벽 +별 +병 +보 +복 +본 +볼 +봐 +봤 +부 +분 +불 +비 +빔 +빛 +빠 +빨 +뼈 +뽀 +뿅 +쁘 +사 +산 +살 +삼 +샀 +상 +새 +색 +생 +서 +선 +설 +섭 +섰 +성 +세 +셔 +션 +셨 +소 +속 +손 +송 +수 +숙 +순 +술 +숫 +숭 +숲 +쉬 +쉽 +스 +슨 +습 +슷 +시 +식 +신 +실 +싫 +심 +십 +싶 +싸 +써 +쓰 +쓴 +씌 +씨 +씩 +씬 +아 +악 +안 +않 +알 +야 +약 +얀 +양 +얘 +어 +언 +얼 +엄 +업 +없 +었 +엉 +에 +여 +역 +연 +염 +엽 +영 +옆 +예 +옛 +오 +온 +올 +옷 +옹 +와 +왔 +왜 +요 +욕 +용 +우 +운 +울 +웃 +워 +원 +월 +웠 +위 +윙 +유 +육 +윤 +으 +은 +을 +음 +응 +의 +이 +익 +인 +일 +읽 +임 +입 +있 +자 +작 +잔 +잖 +잘 +잡 +잤 +장 +재 +저 +전 +점 +정 +제 +져 +졌 +조 +족 +좀 +종 +좋 +죠 +주 +준 +줄 +중 +줘 +즈 +즐 +즘 +지 +진 +집 +짜 +짝 +쩌 +쪼 +쪽 +쫌 +쭈 +쯔 +찌 +찍 +차 +착 +찾 +책 +처 +천 +철 +체 +쳐 +쳤 +초 +촌 +추 +출 +춤 +춥 +춰 +치 +친 +칠 +침 +칩 +칼 +커 +켓 +코 +콩 +쿠 +퀴 +크 +큰 +큽 +키 +킨 +타 +태 +터 +턴 +털 +테 +토 +통 +투 +트 +특 +튼 +틀 +티 +팀 +파 +팔 +패 +페 +펜 +펭 +평 +포 +폭 +표 +품 +풍 +프 +플 +피 +필 +하 +학 +한 +할 +함 +합 +항 +해 +햇 +했 +행 +허 +험 +형 +혜 +호 +혼 +홀 +화 +회 +획 +후 +휴 +흐 +흔 +희 +히 +힘 +ﷺ +ﷻ +! +, +? +� +𠮶 diff --git a/src/f5_tts/infer/infer_cli.py b/src/f5_tts/infer/infer_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..5c7a1bbc3be16ae2f2a3ceb476b5a1551bf14d14 --- /dev/null +++ b/src/f5_tts/infer/infer_cli.py @@ -0,0 +1,355 @@ +import argparse +import codecs +import os +import re +from datetime import datetime +from importlib.resources import files +from pathlib import Path + +import numpy as np +import soundfile as sf +import tomli +from cached_path import cached_path +from omegaconf import OmegaConf + +from f5_tts.infer.utils_infer import ( + mel_spec_type, + target_rms, + cross_fade_duration, + nfe_step, + cfg_strength, + sway_sampling_coef, + speed, + fix_duration, + infer_process, + load_model, + load_vocoder, + preprocess_ref_audio_text, + remove_silence_for_generated_wav, +) +from f5_tts.model import DiT, UNetT # noqa: F401. used for config + + +parser = argparse.ArgumentParser( + prog="python3 infer-cli.py", + description="Commandline interface for E2/F5 TTS with Advanced Batch Processing.", + epilog="Specify options above to override one or more settings from config.", +) +parser.add_argument( + "-c", + "--config", + type=str, + default=os.path.join(files("f5_tts").joinpath("infer/examples/basic"), "basic.toml"), + help="The configuration file, default see infer/examples/basic/basic.toml", +) + + +# Note. Not to provide default value here in order to read default from config file + +parser.add_argument( + "-m", + "--model", + type=str, + help="The model name: F5TTS_v1_Base | F5TTS_Base | E2TTS_Base | etc.", +) +parser.add_argument( + "-mc", + "--model_cfg", + type=str, + help="The path to F5-TTS model config file .yaml", +) +parser.add_argument( + "-p", + "--ckpt_file", + type=str, + help="The path to model checkpoint .pt, leave blank to use default", +) +parser.add_argument( + "-v", + "--vocab_file", + type=str, + help="The path to vocab file .txt, leave blank to use default", +) +parser.add_argument( + "-r", + "--ref_audio", + type=str, + help="The reference audio file.", +) +parser.add_argument( + "-s", + "--ref_text", + type=str, + help="The transcript/subtitle for the reference audio", +) +parser.add_argument( + "-t", + "--gen_text", + type=str, + help="The text to make model synthesize a speech", +) +parser.add_argument( + "-f", + "--gen_file", + type=str, + help="The file with text to generate, will ignore --gen_text", +) +parser.add_argument( + "-o", + "--output_dir", + type=str, + help="The path to output folder", +) +parser.add_argument( + "-w", + "--output_file", + type=str, + help="The name of output file", +) +parser.add_argument( + "--save_chunk", + action="store_true", + help="To save each audio chunks during inference", +) +parser.add_argument( + "--remove_silence", + action="store_true", + help="To remove long silence found in ouput", +) +parser.add_argument( + "--load_vocoder_from_local", + action="store_true", + help="To load vocoder from local dir, default to ../checkpoints/vocos-mel-24khz", +) +parser.add_argument( + "--vocoder_name", + type=str, + choices=["vocos", "bigvgan"], + help=f"Used vocoder name: vocos | bigvgan, default {mel_spec_type}", +) +parser.add_argument( + "--target_rms", + type=float, + help=f"Target output speech loudness normalization value, default {target_rms}", +) +parser.add_argument( + "--cross_fade_duration", + type=float, + help=f"Duration of cross-fade between audio segments in seconds, default {cross_fade_duration}", +) +parser.add_argument( + "--nfe_step", + type=int, + help=f"The number of function evaluation (denoising steps), default {nfe_step}", +) +parser.add_argument( + "--cfg_strength", + type=float, + help=f"Classifier-free guidance strength, default {cfg_strength}", +) +parser.add_argument( + "--sway_sampling_coef", + type=float, + help=f"Sway Sampling coefficient, default {sway_sampling_coef}", +) +parser.add_argument( + "--speed", + type=float, + help=f"The speed of the generated audio, default {speed}", +) +parser.add_argument( + "--fix_duration", + type=float, + help=f"Fix the total duration (ref and gen audios) in seconds, default {fix_duration}", +) +args = parser.parse_args() + + +# config file + +config = tomli.load(open(args.config, "rb")) + + +# command-line interface parameters + +model = args.model or config.get("model", "F5TTS_v1_Base") +ckpt_file = args.ckpt_file or config.get("ckpt_file", "") +vocab_file = args.vocab_file or config.get("vocab_file", "") + +ref_audio = args.ref_audio or config.get("ref_audio", "infer/examples/basic/basic_ref_en.wav") +ref_text = ( + args.ref_text + if args.ref_text is not None + else config.get("ref_text", "Some call me nature, others call me mother nature.") +) +gen_text = args.gen_text or config.get("gen_text", "Here we generate something just for test.") +gen_file = args.gen_file or config.get("gen_file", "") + +output_dir = args.output_dir or config.get("output_dir", "tests") +output_file = args.output_file or config.get( + "output_file", f"infer_cli_{datetime.now().strftime(r'%Y%m%d_%H%M%S')}.wav" +) + +save_chunk = args.save_chunk or config.get("save_chunk", False) +remove_silence = args.remove_silence or config.get("remove_silence", False) +load_vocoder_from_local = args.load_vocoder_from_local or config.get("load_vocoder_from_local", False) + +vocoder_name = args.vocoder_name or config.get("vocoder_name", mel_spec_type) +target_rms = args.target_rms or config.get("target_rms", target_rms) +cross_fade_duration = args.cross_fade_duration or config.get("cross_fade_duration", cross_fade_duration) +nfe_step = args.nfe_step or config.get("nfe_step", nfe_step) +cfg_strength = args.cfg_strength or config.get("cfg_strength", cfg_strength) +sway_sampling_coef = args.sway_sampling_coef or config.get("sway_sampling_coef", sway_sampling_coef) +speed = args.speed or config.get("speed", speed) +fix_duration = args.fix_duration or config.get("fix_duration", fix_duration) + + +# patches for pip pkg user +if "infer/examples/" in ref_audio: + ref_audio = str(files("f5_tts").joinpath(f"{ref_audio}")) +if "infer/examples/" in gen_file: + gen_file = str(files("f5_tts").joinpath(f"{gen_file}")) +if "voices" in config: + for voice in config["voices"]: + voice_ref_audio = config["voices"][voice]["ref_audio"] + if "infer/examples/" in voice_ref_audio: + config["voices"][voice]["ref_audio"] = str(files("f5_tts").joinpath(f"{voice_ref_audio}")) + + +# ignore gen_text if gen_file provided + +if gen_file: + gen_text = codecs.open(gen_file, "r", "utf-8").read() + + +# output path + +wave_path = Path(output_dir) / output_file +# spectrogram_path = Path(output_dir) / "infer_cli_out.png" +if save_chunk: + output_chunk_dir = os.path.join(output_dir, f"{Path(output_file).stem}_chunks") + if not os.path.exists(output_chunk_dir): + os.makedirs(output_chunk_dir) + + +# load vocoder + +if vocoder_name == "vocos": + vocoder_local_path = "../checkpoints/vocos-mel-24khz" +elif vocoder_name == "bigvgan": + vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x" + +vocoder = load_vocoder(vocoder_name=vocoder_name, is_local=load_vocoder_from_local, local_path=vocoder_local_path) + + +# load TTS model + +model_cfg = OmegaConf.load( + args.model_cfg or config.get("model_cfg", str(files("f5_tts").joinpath(f"configs/{model}.yaml"))) +).model +model_cls = globals()[model_cfg.backbone] + +repo_name, ckpt_step, ckpt_type = "F5-TTS", 1250000, "safetensors" + +if model != "F5TTS_Base": + assert vocoder_name == model_cfg.mel_spec.mel_spec_type + +# override for previous models +if model == "F5TTS_Base": + if vocoder_name == "vocos": + ckpt_step = 1200000 + elif vocoder_name == "bigvgan": + model = "F5TTS_Base_bigvgan" + ckpt_type = "pt" +elif model == "E2TTS_Base": + repo_name = "E2-TTS" + ckpt_step = 1200000 + +if not ckpt_file: + ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{model}/model_{ckpt_step}.{ckpt_type}")) + +print(f"Using {model}...") +ema_model = load_model(model_cls, model_cfg.arch, ckpt_file, mel_spec_type=vocoder_name, vocab_file=vocab_file) + + +# inference process + + +def main(): + main_voice = {"ref_audio": ref_audio, "ref_text": ref_text} + if "voices" not in config: + voices = {"main": main_voice} + else: + voices = config["voices"] + voices["main"] = main_voice + for voice in voices: + print("Voice:", voice) + print("ref_audio ", voices[voice]["ref_audio"]) + voices[voice]["ref_audio"], voices[voice]["ref_text"] = preprocess_ref_audio_text( + voices[voice]["ref_audio"], voices[voice]["ref_text"] + ) + print("ref_audio_", voices[voice]["ref_audio"], "\n\n") + + generated_audio_segments = [] + reg1 = r"(?=\[\w+\])" + chunks = re.split(reg1, gen_text) + reg2 = r"\[(\w+)\]" + for text in chunks: + if not text.strip(): + continue + match = re.match(reg2, text) + if match: + voice = match[1] + else: + print("No voice tag found, using main.") + voice = "main" + if voice not in voices: + print(f"Voice {voice} not found, using main.") + voice = "main" + text = re.sub(reg2, "", text) + ref_audio_ = voices[voice]["ref_audio"] + ref_text_ = voices[voice]["ref_text"] + gen_text_ = text.strip() + print(f"Voice: {voice}") + audio_segment, final_sample_rate, spectragram = infer_process( + ref_audio_, + ref_text_, + gen_text_, + ema_model, + vocoder, + mel_spec_type=vocoder_name, + target_rms=target_rms, + cross_fade_duration=cross_fade_duration, + nfe_step=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + speed=speed, + fix_duration=fix_duration, + ) + generated_audio_segments.append(audio_segment) + + if save_chunk: + if len(gen_text_) > 200: + gen_text_ = gen_text_[:200] + " ... " + sf.write( + os.path.join(output_chunk_dir, f"{len(generated_audio_segments)-1}_{gen_text_}.wav"), + audio_segment, + final_sample_rate, + ) + + if generated_audio_segments: + final_wave = np.concatenate(generated_audio_segments) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + with open(wave_path, "wb") as f: + sf.write(f.name, final_wave, final_sample_rate) + # Remove silence + if remove_silence: + remove_silence_for_generated_wav(f.name) + print(f.name) + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/infer/infer_gradio.py b/src/f5_tts/infer/infer_gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..72202f6b7a6b6233a9d86fde3760be604513c0e4 --- /dev/null +++ b/src/f5_tts/infer/infer_gradio.py @@ -0,0 +1,927 @@ +# ruff: noqa: E402 +# Above allows ruff to ignore E402: module level import not at top of file + +import json +import re +import tempfile +from collections import OrderedDict +from importlib.resources import files + +import click +import gradio as gr +import numpy as np +import soundfile as sf +import torchaudio +from cached_path import cached_path +from transformers import AutoModelForCausalLM, AutoTokenizer + +try: + import spaces + + USING_SPACES = True +except ImportError: + USING_SPACES = False + + +def gpu_decorator(func): + if USING_SPACES: + return spaces.GPU(func) + else: + return func + + +from f5_tts.model import DiT, UNetT +from f5_tts.infer.utils_infer import ( + load_vocoder, + load_model, + preprocess_ref_audio_text, + infer_process, + remove_silence_for_generated_wav, + save_spectrogram, +) + + +DEFAULT_TTS_MODEL = "F5-TTS_v1" +tts_model_choice = DEFAULT_TTS_MODEL + +DEFAULT_TTS_MODEL_CFG = [ + "hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors", + "hf://SWivid/F5-TTS/F5TTS_v1_Base/vocab.txt", + json.dumps(dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)), +] + + +# load models + +vocoder = load_vocoder() + + +def load_f5tts(): + ckpt_path = str(cached_path(DEFAULT_TTS_MODEL_CFG[0])) + F5TTS_model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2]) + return load_model(DiT, F5TTS_model_cfg, ckpt_path) + + +def load_e2tts(): + ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors")) + E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4, text_mask_padding=False, pe_attn_head=1) + return load_model(UNetT, E2TTS_model_cfg, ckpt_path) + + +def load_custom(ckpt_path: str, vocab_path="", model_cfg=None): + ckpt_path, vocab_path = ckpt_path.strip(), vocab_path.strip() + if ckpt_path.startswith("hf://"): + ckpt_path = str(cached_path(ckpt_path)) + if vocab_path.startswith("hf://"): + vocab_path = str(cached_path(vocab_path)) + if model_cfg is None: + model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2]) + return load_model(DiT, model_cfg, ckpt_path, vocab_file=vocab_path) + + +F5TTS_ema_model = load_f5tts() +E2TTS_ema_model = load_e2tts() if USING_SPACES else None +custom_ema_model, pre_custom_path = None, "" + +chat_model_state = None +chat_tokenizer_state = None + + +@gpu_decorator +def generate_response(messages, model, tokenizer): + """Generate response using Qwen""" + text = tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True, + ) + + model_inputs = tokenizer([text], return_tensors="pt").to(model.device) + generated_ids = model.generate( + **model_inputs, + max_new_tokens=512, + temperature=0.7, + top_p=0.95, + ) + + generated_ids = [ + output_ids[len(input_ids) :] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) + ] + return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] + + +@gpu_decorator +def infer( + ref_audio_orig, + ref_text, + gen_text, + model, + remove_silence, + cross_fade_duration=0.15, + nfe_step=32, + speed=1, + show_info=gr.Info, +): + if not ref_audio_orig: + gr.Warning("Please provide reference audio.") + return gr.update(), gr.update(), ref_text + + if not gen_text.strip(): + gr.Warning("Please enter text to generate.") + return gr.update(), gr.update(), ref_text + + ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=show_info) + + if model == DEFAULT_TTS_MODEL: + ema_model = F5TTS_ema_model + elif model == "E2-TTS": + global E2TTS_ema_model + if E2TTS_ema_model is None: + show_info("Loading E2-TTS model...") + E2TTS_ema_model = load_e2tts() + ema_model = E2TTS_ema_model + elif isinstance(model, list) and model[0] == "Custom": + assert not USING_SPACES, "Only official checkpoints allowed in Spaces." + global custom_ema_model, pre_custom_path + if pre_custom_path != model[1]: + show_info("Loading Custom TTS model...") + custom_ema_model = load_custom(model[1], vocab_path=model[2], model_cfg=model[3]) + pre_custom_path = model[1] + ema_model = custom_ema_model + + final_wave, final_sample_rate, combined_spectrogram = infer_process( + ref_audio, + ref_text, + gen_text, + ema_model, + vocoder, + cross_fade_duration=cross_fade_duration, + nfe_step=nfe_step, + speed=speed, + show_info=show_info, + progress=gr.Progress(), + ) + + # Remove silence + if remove_silence: + with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: + sf.write(f.name, final_wave, final_sample_rate) + remove_silence_for_generated_wav(f.name) + final_wave, _ = torchaudio.load(f.name) + final_wave = final_wave.squeeze().cpu().numpy() + + # Save the spectrogram + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram: + spectrogram_path = tmp_spectrogram.name + save_spectrogram(combined_spectrogram, spectrogram_path) + + return (final_sample_rate, final_wave), spectrogram_path, ref_text + + +with gr.Blocks() as app_credits: + gr.Markdown(""" +# Credits + +* [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS) +* [RootingInLoad](https://github.com/RootingInLoad) for initial chunk generation and podcast app exploration +* [jpgallegoar](https://github.com/jpgallegoar) for multiple speech-type generation & voice chat +""") +with gr.Blocks() as app_tts: + gr.Markdown("# Batched TTS") + ref_audio_input = gr.Audio(label="Reference Audio", type="filepath") + gen_text_input = gr.Textbox(label="Text to Generate", lines=10) + generate_btn = gr.Button("Synthesize", variant="primary") + with gr.Accordion("Advanced Settings", open=False): + ref_text_input = gr.Textbox( + label="Reference Text", + info="Leave blank to automatically transcribe the reference audio. If you enter text it will override automatic transcription.", + lines=2, + ) + remove_silence = gr.Checkbox( + label="Remove Silences", + info="The model tends to produce silences, especially on longer audio. We can manually remove silences if needed. Note that this is an experimental feature and may produce strange results. This will also increase generation time.", + value=False, + ) + speed_slider = gr.Slider( + label="Speed", + minimum=0.3, + maximum=2.0, + value=1.0, + step=0.1, + info="Adjust the speed of the audio.", + ) + nfe_slider = gr.Slider( + label="NFE Steps", + minimum=4, + maximum=64, + value=32, + step=2, + info="Set the number of denoising steps.", + ) + cross_fade_duration_slider = gr.Slider( + label="Cross-Fade Duration (s)", + minimum=0.0, + maximum=1.0, + value=0.15, + step=0.01, + info="Set the duration of the cross-fade between audio clips.", + ) + + audio_output = gr.Audio(label="Synthesized Audio") + spectrogram_output = gr.Image(label="Spectrogram") + + @gpu_decorator + def basic_tts( + ref_audio_input, + ref_text_input, + gen_text_input, + remove_silence, + cross_fade_duration_slider, + nfe_slider, + speed_slider, + ): + audio_out, spectrogram_path, ref_text_out = infer( + ref_audio_input, + ref_text_input, + gen_text_input, + tts_model_choice, + remove_silence, + cross_fade_duration=cross_fade_duration_slider, + nfe_step=nfe_slider, + speed=speed_slider, + ) + return audio_out, spectrogram_path, ref_text_out + + generate_btn.click( + basic_tts, + inputs=[ + ref_audio_input, + ref_text_input, + gen_text_input, + remove_silence, + cross_fade_duration_slider, + nfe_slider, + speed_slider, + ], + outputs=[audio_output, spectrogram_output, ref_text_input], + ) + + +def parse_speechtypes_text(gen_text): + # Pattern to find {speechtype} + pattern = r"\{(.*?)\}" + + # Split the text by the pattern + tokens = re.split(pattern, gen_text) + + segments = [] + + current_style = "Regular" + + for i in range(len(tokens)): + if i % 2 == 0: + # This is text + text = tokens[i].strip() + if text: + segments.append({"style": current_style, "text": text}) + else: + # This is style + style = tokens[i].strip() + current_style = style + + return segments + + +with gr.Blocks() as app_multistyle: + # New section for multistyle generation + gr.Markdown( + """ + # Multiple Speech-Type Generation + + This section allows you to generate multiple speech types or multiple people's voices. Enter your text in the format shown below, and the system will generate speech using the appropriate type. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified. + """ + ) + + with gr.Row(): + gr.Markdown( + """ + **Example Input:** + {Regular} Hello, I'd like to order a sandwich please. + {Surprised} What do you mean you're out of bread? + {Sad} I really wanted a sandwich though... + {Angry} You know what, darn you and your little shop! + {Whisper} I'll just go back home and cry now. + {Shouting} Why me?! + """ + ) + + gr.Markdown( + """ + **Example Input 2:** + {Speaker1_Happy} Hello, I'd like to order a sandwich please. + {Speaker2_Regular} Sorry, we're out of bread. + {Speaker1_Sad} I really wanted a sandwich though... + {Speaker2_Whisper} I'll give you the last one I was hiding. + """ + ) + + gr.Markdown( + "Upload different audio clips for each speech type. The first speech type is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button." + ) + + # Regular speech type (mandatory) + with gr.Row() as regular_row: + with gr.Column(): + regular_name = gr.Textbox(value="Regular", label="Speech Type Name") + regular_insert = gr.Button("Insert Label", variant="secondary") + regular_audio = gr.Audio(label="Regular Reference Audio", type="filepath") + regular_ref_text = gr.Textbox(label="Reference Text (Regular)", lines=2) + + # Regular speech type (max 100) + max_speech_types = 100 + speech_type_rows = [regular_row] + speech_type_names = [regular_name] + speech_type_audios = [regular_audio] + speech_type_ref_texts = [regular_ref_text] + speech_type_delete_btns = [None] + speech_type_insert_btns = [regular_insert] + + # Additional speech types (99 more) + for i in range(max_speech_types - 1): + with gr.Row(visible=False) as row: + with gr.Column(): + name_input = gr.Textbox(label="Speech Type Name") + delete_btn = gr.Button("Delete Type", variant="secondary") + insert_btn = gr.Button("Insert Label", variant="secondary") + audio_input = gr.Audio(label="Reference Audio", type="filepath") + ref_text_input = gr.Textbox(label="Reference Text", lines=2) + speech_type_rows.append(row) + speech_type_names.append(name_input) + speech_type_audios.append(audio_input) + speech_type_ref_texts.append(ref_text_input) + speech_type_delete_btns.append(delete_btn) + speech_type_insert_btns.append(insert_btn) + + # Button to add speech type + add_speech_type_btn = gr.Button("Add Speech Type") + + # Keep track of autoincrement of speech types, no roll back + speech_type_count = 1 + + # Function to add a speech type + def add_speech_type_fn(): + row_updates = [gr.update() for _ in range(max_speech_types)] + global speech_type_count + if speech_type_count < max_speech_types: + row_updates[speech_type_count] = gr.update(visible=True) + speech_type_count += 1 + else: + gr.Warning("Exhausted maximum number of speech types. Consider restart the app.") + return row_updates + + add_speech_type_btn.click(add_speech_type_fn, outputs=speech_type_rows) + + # Function to delete a speech type + def delete_speech_type_fn(): + return gr.update(visible=False), None, None, None + + # Update delete button clicks + for i in range(1, len(speech_type_delete_btns)): + speech_type_delete_btns[i].click( + delete_speech_type_fn, + outputs=[speech_type_rows[i], speech_type_names[i], speech_type_audios[i], speech_type_ref_texts[i]], + ) + + # Text input for the prompt + gen_text_input_multistyle = gr.Textbox( + label="Text to Generate", + lines=10, + placeholder="Enter the script with speaker names (or emotion types) at the start of each block, e.g.:\n\n{Regular} Hello, I'd like to order a sandwich please.\n{Surprised} What do you mean you're out of bread?\n{Sad} I really wanted a sandwich though...\n{Angry} You know what, darn you and your little shop!\n{Whisper} I'll just go back home and cry now.\n{Shouting} Why me?!", + ) + + def make_insert_speech_type_fn(index): + def insert_speech_type_fn(current_text, speech_type_name): + current_text = current_text or "" + speech_type_name = speech_type_name or "None" + updated_text = current_text + f"{{{speech_type_name}}} " + return updated_text + + return insert_speech_type_fn + + for i, insert_btn in enumerate(speech_type_insert_btns): + insert_fn = make_insert_speech_type_fn(i) + insert_btn.click( + insert_fn, + inputs=[gen_text_input_multistyle, speech_type_names[i]], + outputs=gen_text_input_multistyle, + ) + + with gr.Accordion("Advanced Settings", open=False): + remove_silence_multistyle = gr.Checkbox( + label="Remove Silences", + value=True, + ) + + # Generate button + generate_multistyle_btn = gr.Button("Generate Multi-Style Speech", variant="primary") + + # Output audio + audio_output_multistyle = gr.Audio(label="Synthesized Audio") + + @gpu_decorator + def generate_multistyle_speech( + gen_text, + *args, + ): + speech_type_names_list = args[:max_speech_types] + speech_type_audios_list = args[max_speech_types : 2 * max_speech_types] + speech_type_ref_texts_list = args[2 * max_speech_types : 3 * max_speech_types] + remove_silence = args[3 * max_speech_types] + # Collect the speech types and their audios into a dict + speech_types = OrderedDict() + + ref_text_idx = 0 + for name_input, audio_input, ref_text_input in zip( + speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list + ): + if name_input and audio_input: + speech_types[name_input] = {"audio": audio_input, "ref_text": ref_text_input} + else: + speech_types[f"@{ref_text_idx}@"] = {"audio": "", "ref_text": ""} + ref_text_idx += 1 + + # Parse the gen_text into segments + segments = parse_speechtypes_text(gen_text) + + # For each segment, generate speech + generated_audio_segments = [] + current_style = "Regular" + + for segment in segments: + style = segment["style"] + text = segment["text"] + + if style in speech_types: + current_style = style + else: + gr.Warning(f"Type {style} is not available, will use Regular as default.") + current_style = "Regular" + + try: + ref_audio = speech_types[current_style]["audio"] + except KeyError: + gr.Warning(f"Please provide reference audio for type {current_style}.") + return [None] + [speech_types[style]["ref_text"] for style in speech_types] + ref_text = speech_types[current_style].get("ref_text", "") + + # Generate speech for this segment + audio_out, _, ref_text_out = infer( + ref_audio, ref_text, text, tts_model_choice, remove_silence, 0, show_info=print + ) # show_info=print no pull to top when generating + sr, audio_data = audio_out + + generated_audio_segments.append(audio_data) + speech_types[current_style]["ref_text"] = ref_text_out + + # Concatenate all audio segments + if generated_audio_segments: + final_audio_data = np.concatenate(generated_audio_segments) + return [(sr, final_audio_data)] + [speech_types[style]["ref_text"] for style in speech_types] + else: + gr.Warning("No audio generated.") + return [None] + [speech_types[style]["ref_text"] for style in speech_types] + + generate_multistyle_btn.click( + generate_multistyle_speech, + inputs=[ + gen_text_input_multistyle, + ] + + speech_type_names + + speech_type_audios + + speech_type_ref_texts + + [ + remove_silence_multistyle, + ], + outputs=[audio_output_multistyle] + speech_type_ref_texts, + ) + + # Validation function to disable Generate button if speech types are missing + def validate_speech_types(gen_text, regular_name, *args): + speech_type_names_list = args + + # Collect the speech types names + speech_types_available = set() + if regular_name: + speech_types_available.add(regular_name) + for name_input in speech_type_names_list: + if name_input: + speech_types_available.add(name_input) + + # Parse the gen_text to get the speech types used + segments = parse_speechtypes_text(gen_text) + speech_types_in_text = set(segment["style"] for segment in segments) + + # Check if all speech types in text are available + missing_speech_types = speech_types_in_text - speech_types_available + + if missing_speech_types: + # Disable the generate button + return gr.update(interactive=False) + else: + # Enable the generate button + return gr.update(interactive=True) + + gen_text_input_multistyle.change( + validate_speech_types, + inputs=[gen_text_input_multistyle, regular_name] + speech_type_names, + outputs=generate_multistyle_btn, + ) + + +with gr.Blocks() as app_chat: + gr.Markdown( + """ +# Voice Chat +Have a conversation with an AI using your reference voice! +1. Upload a reference audio clip and optionally its transcript. +2. Load the chat model. +3. Record your message through your microphone. +4. The AI will respond using the reference voice. +""" + ) + + if not USING_SPACES: + load_chat_model_btn = gr.Button("Load Chat Model", variant="primary") + + chat_interface_container = gr.Column(visible=False) + + @gpu_decorator + def load_chat_model(): + global chat_model_state, chat_tokenizer_state + if chat_model_state is None: + show_info = gr.Info + show_info("Loading chat model...") + model_name = "Qwen/Qwen2.5-3B-Instruct" + chat_model_state = AutoModelForCausalLM.from_pretrained( + model_name, torch_dtype="auto", device_map="auto" + ) + chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name) + show_info("Chat model loaded.") + + return gr.update(visible=False), gr.update(visible=True) + + load_chat_model_btn.click(load_chat_model, outputs=[load_chat_model_btn, chat_interface_container]) + + else: + chat_interface_container = gr.Column() + + if chat_model_state is None: + model_name = "Qwen/Qwen2.5-3B-Instruct" + chat_model_state = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto") + chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name) + + with chat_interface_container: + with gr.Row(): + with gr.Column(): + ref_audio_chat = gr.Audio(label="Reference Audio", type="filepath") + with gr.Column(): + with gr.Accordion("Advanced Settings", open=False): + remove_silence_chat = gr.Checkbox( + label="Remove Silences", + value=True, + ) + ref_text_chat = gr.Textbox( + label="Reference Text", + info="Optional: Leave blank to auto-transcribe", + lines=2, + ) + system_prompt_chat = gr.Textbox( + label="System Prompt", + value="You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", + lines=2, + ) + + chatbot_interface = gr.Chatbot(label="Conversation") + + with gr.Row(): + with gr.Column(): + audio_input_chat = gr.Microphone( + label="Speak your message", + type="filepath", + ) + audio_output_chat = gr.Audio(autoplay=True) + with gr.Column(): + text_input_chat = gr.Textbox( + label="Type your message", + lines=1, + ) + send_btn_chat = gr.Button("Send Message") + clear_btn_chat = gr.Button("Clear Conversation") + + conversation_state = gr.State( + value=[ + { + "role": "system", + "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", + } + ] + ) + + # Modify process_audio_input to use model and tokenizer from state + @gpu_decorator + def process_audio_input(audio_path, text, history, conv_state): + """Handle audio or text input from user""" + + if not audio_path and not text.strip(): + return history, conv_state, "" + + if audio_path: + text = preprocess_ref_audio_text(audio_path, text)[1] + + if not text.strip(): + return history, conv_state, "" + + conv_state.append({"role": "user", "content": text}) + history.append((text, None)) + + response = generate_response(conv_state, chat_model_state, chat_tokenizer_state) + + conv_state.append({"role": "assistant", "content": response}) + history[-1] = (text, response) + + return history, conv_state, "" + + @gpu_decorator + def generate_audio_response(history, ref_audio, ref_text, remove_silence): + """Generate TTS audio for AI response""" + if not history or not ref_audio: + return None + + last_user_message, last_ai_response = history[-1] + if not last_ai_response: + return None + + audio_result, _, ref_text_out = infer( + ref_audio, + ref_text, + last_ai_response, + tts_model_choice, + remove_silence, + cross_fade_duration=0.15, + speed=1.0, + show_info=print, # show_info=print no pull to top when generating + ) + return audio_result, ref_text_out + + def clear_conversation(): + """Reset the conversation""" + return [], [ + { + "role": "system", + "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", + } + ] + + def update_system_prompt(new_prompt): + """Update the system prompt and reset the conversation""" + new_conv_state = [{"role": "system", "content": new_prompt}] + return [], new_conv_state + + # Handle audio input + audio_input_chat.stop_recording( + process_audio_input, + inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state], + outputs=[chatbot_interface, conversation_state], + ).then( + generate_audio_response, + inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat], + outputs=[audio_output_chat, ref_text_chat], + ).then( + lambda: None, + None, + audio_input_chat, + ) + + # Handle text input + text_input_chat.submit( + process_audio_input, + inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state], + outputs=[chatbot_interface, conversation_state], + ).then( + generate_audio_response, + inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat], + outputs=[audio_output_chat, ref_text_chat], + ).then( + lambda: None, + None, + text_input_chat, + ) + + # Handle send button + send_btn_chat.click( + process_audio_input, + inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state], + outputs=[chatbot_interface, conversation_state], + ).then( + generate_audio_response, + inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat], + outputs=[audio_output_chat, ref_text_chat], + ).then( + lambda: None, + None, + text_input_chat, + ) + + # Handle clear button + clear_btn_chat.click( + clear_conversation, + outputs=[chatbot_interface, conversation_state], + ) + + # Handle system prompt change and reset conversation + system_prompt_chat.change( + update_system_prompt, + inputs=system_prompt_chat, + outputs=[chatbot_interface, conversation_state], + ) + + +with gr.Blocks() as app: + gr.Markdown( + f""" +# E2/F5 TTS + +This is {"a local web UI for [F5 TTS](https://github.com/SWivid/F5-TTS)" if not USING_SPACES else "an online demo for [F5-TTS](https://github.com/SWivid/F5-TTS)"} with advanced batch processing support. This app supports the following TTS models: + +* [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching) +* [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS) + +The checkpoints currently support English and Chinese. + +If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 15s with ✂ in the bottom right corner (otherwise might have non-optimal auto-trimmed result). + +**NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<15s). Ensure the audio is fully uploaded before generating.** +""" + ) + + last_used_custom = files("f5_tts").joinpath("infer/.cache/last_used_custom_model_info_v1.txt") + + def load_last_used_custom(): + try: + custom = [] + with open(last_used_custom, "r", encoding="utf-8") as f: + for line in f: + custom.append(line.strip()) + return custom + except FileNotFoundError: + last_used_custom.parent.mkdir(parents=True, exist_ok=True) + return DEFAULT_TTS_MODEL_CFG + + def switch_tts_model(new_choice): + global tts_model_choice + if new_choice == "Custom": # override in case webpage is refreshed + custom_ckpt_path, custom_vocab_path, custom_model_cfg = load_last_used_custom() + tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path, json.loads(custom_model_cfg)] + return ( + gr.update(visible=True, value=custom_ckpt_path), + gr.update(visible=True, value=custom_vocab_path), + gr.update(visible=True, value=custom_model_cfg), + ) + else: + tts_model_choice = new_choice + return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) + + def set_custom_model(custom_ckpt_path, custom_vocab_path, custom_model_cfg): + global tts_model_choice + tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path, json.loads(custom_model_cfg)] + with open(last_used_custom, "w", encoding="utf-8") as f: + f.write(custom_ckpt_path + "\n" + custom_vocab_path + "\n" + custom_model_cfg + "\n") + + with gr.Row(): + if not USING_SPACES: + choose_tts_model = gr.Radio( + choices=[DEFAULT_TTS_MODEL, "E2-TTS", "Custom"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL + ) + else: + choose_tts_model = gr.Radio( + choices=[DEFAULT_TTS_MODEL, "E2-TTS"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL + ) + custom_ckpt_path = gr.Dropdown( + choices=[DEFAULT_TTS_MODEL_CFG[0]], + value=load_last_used_custom()[0], + allow_custom_value=True, + label="Model: local_path | hf://user_id/repo_id/model_ckpt", + visible=False, + ) + custom_vocab_path = gr.Dropdown( + choices=[DEFAULT_TTS_MODEL_CFG[1]], + value=load_last_used_custom()[1], + allow_custom_value=True, + label="Vocab: local_path | hf://user_id/repo_id/vocab_file", + visible=False, + ) + custom_model_cfg = gr.Dropdown( + choices=[ + DEFAULT_TTS_MODEL_CFG[2], + json.dumps( + dict( + dim=1024, + depth=22, + heads=16, + ff_mult=2, + text_dim=512, + text_mask_padding=False, + conv_layers=4, + pe_attn_head=1, + ) + ), + json.dumps( + dict( + dim=768, + depth=18, + heads=12, + ff_mult=2, + text_dim=512, + text_mask_padding=False, + conv_layers=4, + pe_attn_head=1, + ) + ), + ], + value=load_last_used_custom()[2], + allow_custom_value=True, + label="Config: in a dictionary form", + visible=False, + ) + + choose_tts_model.change( + switch_tts_model, + inputs=[choose_tts_model], + outputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], + show_progress="hidden", + ) + custom_ckpt_path.change( + set_custom_model, + inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], + show_progress="hidden", + ) + custom_vocab_path.change( + set_custom_model, + inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], + show_progress="hidden", + ) + custom_model_cfg.change( + set_custom_model, + inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], + show_progress="hidden", + ) + + gr.TabbedInterface( + [app_tts, app_multistyle, app_chat, app_credits], + ["Basic-TTS", "Multi-Speech", "Voice-Chat", "Credits"], + ) + + +@click.command() +@click.option("--port", "-p", default=None, type=int, help="Port to run the app on") +@click.option("--host", "-H", default=None, help="Host to run the app on") +@click.option( + "--share", + "-s", + default=False, + is_flag=True, + help="Share the app via Gradio share link", +) +@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access") +@click.option( + "--root_path", + "-r", + default=None, + type=str, + help='The root path (or "mount point") of the application, if it\'s not served from the root ("/") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application, e.g. set "/myapp" or full URL for application served at "https://example.com/myapp".', +) +@click.option( + "--inbrowser", + "-i", + is_flag=True, + default=False, + help="Automatically launch the interface in the default web browser", +) +def main(port, host, share, api, root_path, inbrowser): + global app + print("Starting app...") + app.queue(api_open=api).launch( + server_name=host, + server_port=port, + share=share, + show_api=api, + root_path=root_path, + inbrowser=inbrowser, + ) + + +if __name__ == "__main__": + if not USING_SPACES: + main() + else: + app.queue().launch() diff --git a/src/f5_tts/infer/speech_edit.py b/src/f5_tts/infer/speech_edit.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d073eadaa2bf14ceba43c04acc7453eb662ee8 --- /dev/null +++ b/src/f5_tts/infer/speech_edit.py @@ -0,0 +1,200 @@ +import os + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # for MPS device compatibility + +from importlib.resources import files + +import torch +import torch.nn.functional as F +import torchaudio +from omegaconf import OmegaConf + +from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder, save_spectrogram +from f5_tts.model import CFM, DiT, UNetT # noqa: F401. used for config +from f5_tts.model.utils import convert_char_to_pinyin, get_tokenizer + +device = ( + "cuda" + if torch.cuda.is_available() + else "xpu" + if torch.xpu.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" +) + + +# ---------------------- infer setting ---------------------- # + +seed = None # int | None + +exp_name = "F5TTS_v1_Base" # F5TTS_v1_Base | E2TTS_Base +ckpt_step = 1250000 + +nfe_step = 32 # 16, 32 +cfg_strength = 2.0 +ode_method = "euler" # euler | midpoint +sway_sampling_coef = -1.0 +speed = 1.0 +target_rms = 0.1 + + +model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{exp_name}.yaml"))) +model_cls = globals()[model_cfg.model.backbone] +model_arc = model_cfg.model.arch + +dataset_name = model_cfg.datasets.name +tokenizer = model_cfg.model.tokenizer + +mel_spec_type = model_cfg.model.mel_spec.mel_spec_type +target_sample_rate = model_cfg.model.mel_spec.target_sample_rate +n_mel_channels = model_cfg.model.mel_spec.n_mel_channels +hop_length = model_cfg.model.mel_spec.hop_length +win_length = model_cfg.model.mel_spec.win_length +n_fft = model_cfg.model.mel_spec.n_fft + + +ckpt_path = str(files("f5_tts").joinpath("../../")) + f"ckpts/{exp_name}/model_{ckpt_step}.safetensors" +output_dir = "tests" + + +# [leverage https://github.com/MahmoudAshraf97/ctc-forced-aligner to get char level alignment] +# pip install git+https://github.com/MahmoudAshraf97/ctc-forced-aligner.git +# [write the origin_text into a file, e.g. tests/test_edit.txt] +# ctc-forced-aligner --audio_path "src/f5_tts/infer/examples/basic/basic_ref_en.wav" --text_path "tests/test_edit.txt" --language "zho" --romanize --split_size "char" +# [result will be saved at same path of audio file] +# [--language "zho" for Chinese, "eng" for English] +# [if local ckpt, set --alignment_model "../checkpoints/mms-300m-1130-forced-aligner"] + +audio_to_edit = str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")) +origin_text = "Some call me nature, others call me mother nature." +target_text = "Some call me optimist, others call me realist." +parts_to_edit = [ + [1.42, 2.44], + [4.04, 4.9], +] # stard_ends of "nature" & "mother nature", in seconds +fix_duration = [ + 1.2, + 1, +] # fix duration for "optimist" & "realist", in seconds + +# audio_to_edit = "src/f5_tts/infer/examples/basic/basic_ref_zh.wav" +# origin_text = "对,这就是我,万人敬仰的太乙真人。" +# target_text = "对,那就是你,万人敬仰的太白金星。" +# parts_to_edit = [[0.84, 1.4], [1.92, 2.4], [4.26, 6.26], ] +# fix_duration = None # use origin text duration + + +# -------------------------------------------------# + +use_ema = True + +if not os.path.exists(output_dir): + os.makedirs(output_dir) + +# Vocoder model +local = False +if mel_spec_type == "vocos": + vocoder_local_path = "../checkpoints/charactr/vocos-mel-24khz" +elif mel_spec_type == "bigvgan": + vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x" +vocoder = load_vocoder(vocoder_name=mel_spec_type, is_local=local, local_path=vocoder_local_path) + +# Tokenizer +vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer) + +# Model +model = CFM( + transformer=model_cls(**model_arc, text_num_embeds=vocab_size, mel_dim=n_mel_channels), + mel_spec_kwargs=dict( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ), + odeint_kwargs=dict( + method=ode_method, + ), + vocab_char_map=vocab_char_map, +).to(device) + +dtype = torch.float32 if mel_spec_type == "bigvgan" else None +model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema) + +# Audio +audio, sr = torchaudio.load(audio_to_edit) +if audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True) +rms = torch.sqrt(torch.mean(torch.square(audio))) +if rms < target_rms: + audio = audio * target_rms / rms +if sr != target_sample_rate: + resampler = torchaudio.transforms.Resample(sr, target_sample_rate) + audio = resampler(audio) +offset = 0 +audio_ = torch.zeros(1, 0) +edit_mask = torch.zeros(1, 0, dtype=torch.bool) +for part in parts_to_edit: + start, end = part + part_dur = end - start if fix_duration is None else fix_duration.pop(0) + part_dur = part_dur * target_sample_rate + start = start * target_sample_rate + audio_ = torch.cat((audio_, audio[:, round(offset) : round(start)], torch.zeros(1, round(part_dur))), dim=-1) + edit_mask = torch.cat( + ( + edit_mask, + torch.ones(1, round((start - offset) / hop_length), dtype=torch.bool), + torch.zeros(1, round(part_dur / hop_length), dtype=torch.bool), + ), + dim=-1, + ) + offset = end * target_sample_rate +# audio = torch.cat((audio_, audio[:, round(offset):]), dim = -1) +edit_mask = F.pad(edit_mask, (0, audio.shape[-1] // hop_length - edit_mask.shape[-1] + 1), value=True) +audio = audio.to(device) +edit_mask = edit_mask.to(device) + +# Text +text_list = [target_text] +if tokenizer == "pinyin": + final_text_list = convert_char_to_pinyin(text_list) +else: + final_text_list = [text_list] +print(f"text : {text_list}") +print(f"pinyin: {final_text_list}") + +# Duration +ref_audio_len = 0 +duration = audio.shape[-1] // hop_length + +# Inference +with torch.inference_mode(): + generated, trajectory = model.sample( + cond=audio, + text=final_text_list, + duration=duration, + steps=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + seed=seed, + edit_mask=edit_mask, + ) + print(f"Generated mel: {generated.shape}") + + # Final result + generated = generated.to(torch.float32) + generated = generated[:, ref_audio_len:, :] + gen_mel_spec = generated.permute(0, 2, 1) + if mel_spec_type == "vocos": + generated_wave = vocoder.decode(gen_mel_spec).cpu() + elif mel_spec_type == "bigvgan": + generated_wave = vocoder(gen_mel_spec).squeeze(0).cpu() + + if rms < target_rms: + generated_wave = generated_wave * rms / target_rms + + save_spectrogram(gen_mel_spec[0].cpu().numpy(), f"{output_dir}/speech_edit_out.png") + torchaudio.save(f"{output_dir}/speech_edit_out.wav", generated_wave, target_sample_rate) + print(f"Generated wav: {generated_wave.shape}") diff --git a/src/f5_tts/infer/utils_infer.py b/src/f5_tts/infer/utils_infer.py new file mode 100644 index 0000000000000000000000000000000000000000..b2fd72719dbbb511480216538e4999a8735095ba --- /dev/null +++ b/src/f5_tts/infer/utils_infer.py @@ -0,0 +1,589 @@ +# A unified script for inference process +# Make adjustments inside functions, and consider both gradio and cli scripts if need to change func output format +import os +import sys +from concurrent.futures import ThreadPoolExecutor + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # for MPS device compatibility +sys.path.append(f"{os.path.dirname(os.path.abspath(__file__))}/../../third_party/BigVGAN/") + +import hashlib +import re +import tempfile +from importlib.resources import files + +import matplotlib + +matplotlib.use("Agg") + +import matplotlib.pylab as plt +import numpy as np +import torch +import torchaudio +import tqdm +from huggingface_hub import snapshot_download, hf_hub_download +from pydub import AudioSegment, silence +from transformers import pipeline +from vocos import Vocos + +from f5_tts.model import CFM +from f5_tts.model.utils import ( + get_tokenizer, + convert_char_to_pinyin, +) + +_ref_audio_cache = {} + +device = ( + "cuda" + if torch.cuda.is_available() + else "xpu" + if torch.xpu.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" +) + +# ----------------------------------------- + +target_sample_rate = 24000 +n_mel_channels = 100 +hop_length = 256 +win_length = 1024 +n_fft = 1024 +mel_spec_type = "vocos" +target_rms = 0.1 +cross_fade_duration = 0.15 +ode_method = "euler" +nfe_step = 32 # 16, 32 +cfg_strength = 2.0 +sway_sampling_coef = -1.0 +speed = 1.0 +fix_duration = None + +# ----------------------------------------- + + +# chunk text into smaller pieces + + +def chunk_text(text, max_chars=135): + """ + Splits the input text into chunks, each with a maximum number of characters. + + Args: + text (str): The text to be split. + max_chars (int): The maximum number of characters per chunk. + + Returns: + List[str]: A list of text chunks. + """ + chunks = [] + current_chunk = "" + # Split the text into sentences based on punctuation followed by whitespace + sentences = re.split(r"(?<=[;:,.!?])\s+|(?<=[;:,。!?])", text) + + for sentence in sentences: + if len(current_chunk.encode("utf-8")) + len(sentence.encode("utf-8")) <= max_chars: + current_chunk += sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence + else: + if current_chunk: + chunks.append(current_chunk.strip()) + current_chunk = sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence + + if current_chunk: + chunks.append(current_chunk.strip()) + + return chunks + + +# load vocoder +def load_vocoder(vocoder_name="vocos", is_local=False, local_path="", device=device, hf_cache_dir=None): + if vocoder_name == "vocos": + # vocoder = Vocos.from_pretrained("charactr/vocos-mel-24khz").to(device) + if is_local: + print(f"Load vocos from local path {local_path}") + config_path = f"{local_path}/config.yaml" + model_path = f"{local_path}/pytorch_model.bin" + else: + print("Download Vocos from huggingface charactr/vocos-mel-24khz") + repo_id = "charactr/vocos-mel-24khz" + config_path = hf_hub_download(repo_id=repo_id, cache_dir=hf_cache_dir, filename="config.yaml") + model_path = hf_hub_download(repo_id=repo_id, cache_dir=hf_cache_dir, filename="pytorch_model.bin") + vocoder = Vocos.from_hparams(config_path) + state_dict = torch.load(model_path, map_location="cpu", weights_only=True) + from vocos.feature_extractors import EncodecFeatures + + if isinstance(vocoder.feature_extractor, EncodecFeatures): + encodec_parameters = { + "feature_extractor.encodec." + key: value + for key, value in vocoder.feature_extractor.encodec.state_dict().items() + } + state_dict.update(encodec_parameters) + vocoder.load_state_dict(state_dict) + vocoder = vocoder.eval().to(device) + elif vocoder_name == "bigvgan": + try: + from third_party.BigVGAN import bigvgan + except ImportError: + print("You need to follow the README to init submodule and change the BigVGAN source code.") + if is_local: + """download from https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x/tree/main""" + vocoder = bigvgan.BigVGAN.from_pretrained(local_path, use_cuda_kernel=False) + else: + local_path = snapshot_download(repo_id="nvidia/bigvgan_v2_24khz_100band_256x", cache_dir=hf_cache_dir) + vocoder = bigvgan.BigVGAN.from_pretrained(local_path, use_cuda_kernel=False) + + vocoder.remove_weight_norm() + vocoder = vocoder.eval().to(device) + return vocoder + + +# load asr pipeline + +asr_pipe = None + + +def initialize_asr_pipeline(device: str = device, dtype=None): + if dtype is None: + dtype = ( + torch.float16 + if "cuda" in device + and torch.cuda.get_device_properties(device).major >= 6 + and not torch.cuda.get_device_name().endswith("[ZLUDA]") + else torch.float32 + ) + global asr_pipe + asr_pipe = pipeline( + "automatic-speech-recognition", + model="openai/whisper-large-v3-turbo", + torch_dtype=dtype, + device=device, + ) + + +# transcribe + + +def transcribe(ref_audio, language=None): + global asr_pipe + if asr_pipe is None: + initialize_asr_pipeline(device=device) + return asr_pipe( + ref_audio, + chunk_length_s=30, + batch_size=128, + generate_kwargs={"task": "transcribe", "language": language} if language else {"task": "transcribe"}, + return_timestamps=False, + )["text"].strip() + + +# load model checkpoint for inference + + +def load_checkpoint(model, ckpt_path, device: str, dtype=None, use_ema=True): + if dtype is None: + dtype = ( + torch.float16 + if "cuda" in device + and torch.cuda.get_device_properties(device).major >= 6 + and not torch.cuda.get_device_name().endswith("[ZLUDA]") + else torch.float32 + ) + model = model.to(dtype) + + ckpt_type = ckpt_path.split(".")[-1] + if ckpt_type == "safetensors": + from safetensors.torch import load_file + + checkpoint = load_file(ckpt_path, device=device) + else: + checkpoint = torch.load(ckpt_path, map_location=device, weights_only=True) + + if use_ema: + if ckpt_type == "safetensors": + checkpoint = {"ema_model_state_dict": checkpoint} + checkpoint["model_state_dict"] = { + k.replace("ema_model.", ""): v + for k, v in checkpoint["ema_model_state_dict"].items() + if k not in ["initted", "step"] + } + + # patch for backward compatibility, 305e3ea + for key in ["mel_spec.mel_stft.mel_scale.fb", "mel_spec.mel_stft.spectrogram.window"]: + if key in checkpoint["model_state_dict"]: + del checkpoint["model_state_dict"][key] + + model.load_state_dict(checkpoint["model_state_dict"]) + else: + if ckpt_type == "safetensors": + checkpoint = {"model_state_dict": checkpoint} + model.load_state_dict(checkpoint["model_state_dict"]) + + del checkpoint + torch.cuda.empty_cache() + + return model.to(device) + + +# load model for inference + + +def load_model( + model_cls, + model_cfg, + ckpt_path, + mel_spec_type=mel_spec_type, + vocab_file="", + ode_method=ode_method, + use_ema=True, + device=device, +): + if vocab_file == "": + vocab_file = str(files("f5_tts").joinpath("infer/examples/vocab.txt")) + tokenizer = "custom" + + print("\nvocab : ", vocab_file) + print("token : ", tokenizer) + print("model : ", ckpt_path, "\n") + + vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer) + model = CFM( + transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels), + mel_spec_kwargs=dict( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ), + odeint_kwargs=dict( + method=ode_method, + ), + vocab_char_map=vocab_char_map, + ).to(device) + + dtype = torch.float32 if mel_spec_type == "bigvgan" else None + model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema) + + return model + + +def remove_silence_edges(audio, silence_threshold=-42): + # Remove silence from the start + non_silent_start_idx = silence.detect_leading_silence(audio, silence_threshold=silence_threshold) + audio = audio[non_silent_start_idx:] + + # Remove silence from the end + non_silent_end_duration = audio.duration_seconds + for ms in reversed(audio): + if ms.dBFS > silence_threshold: + break + non_silent_end_duration -= 0.001 + trimmed_audio = audio[: int(non_silent_end_duration * 1000)] + + return trimmed_audio + + +# preprocess reference audio and text + + +def preprocess_ref_audio_text(ref_audio_orig, ref_text, clip_short=True, show_info=print, device=device): + show_info("Converting audio...") + with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: + aseg = AudioSegment.from_file(ref_audio_orig) + + if clip_short: + # 1. try to find long silence for clipping + non_silent_segs = silence.split_on_silence( + aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000, seek_step=10 + ) + non_silent_wave = AudioSegment.silent(duration=0) + for non_silent_seg in non_silent_segs: + if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 12000: + show_info("Audio is over 15s, clipping short. (1)") + break + non_silent_wave += non_silent_seg + + # 2. try to find short silence for clipping if 1. failed + if len(non_silent_wave) > 12000: + non_silent_segs = silence.split_on_silence( + aseg, min_silence_len=100, silence_thresh=-40, keep_silence=1000, seek_step=10 + ) + non_silent_wave = AudioSegment.silent(duration=0) + for non_silent_seg in non_silent_segs: + if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 12000: + show_info("Audio is over 15s, clipping short. (2)") + break + non_silent_wave += non_silent_seg + + aseg = non_silent_wave + + # 3. if no proper silence found for clipping + if len(aseg) > 12000: + aseg = aseg[:12000] + show_info("Audio is over 15s, clipping short. (3)") + + aseg = remove_silence_edges(aseg) + AudioSegment.silent(duration=50) + aseg.export(f.name, format="wav") + ref_audio = f.name + + # Compute a hash of the reference audio file + with open(ref_audio, "rb") as audio_file: + audio_data = audio_file.read() + audio_hash = hashlib.md5(audio_data).hexdigest() + + if not ref_text.strip(): + global _ref_audio_cache + if audio_hash in _ref_audio_cache: + # Use cached asr transcription + show_info("Using cached reference text...") + ref_text = _ref_audio_cache[audio_hash] + else: + show_info("No reference text provided, transcribing reference audio...") + ref_text = transcribe(ref_audio) + # Cache the transcribed text (not caching custom ref_text, enabling users to do manual tweak) + _ref_audio_cache[audio_hash] = ref_text + else: + show_info("Using custom reference text...") + + # Ensure ref_text ends with a proper sentence-ending punctuation + if not ref_text.endswith(". ") and not ref_text.endswith("。"): + if ref_text.endswith("."): + ref_text += " " + else: + ref_text += ". " + + print("\nref_text ", ref_text) + + return ref_audio, ref_text + + +# infer process: chunk text -> infer batches [i.e. infer_batch_process()] + + +def infer_process( + ref_audio, + ref_text, + gen_text, + model_obj, + vocoder, + mel_spec_type=mel_spec_type, + show_info=print, + progress=tqdm, + target_rms=target_rms, + cross_fade_duration=cross_fade_duration, + nfe_step=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + speed=speed, + fix_duration=fix_duration, + device=device, +): + # Split the input text into batches + audio, sr = torchaudio.load(ref_audio) + max_chars = int(len(ref_text.encode("utf-8")) / (audio.shape[-1] / sr) * (22 - audio.shape[-1] / sr)) + gen_text_batches = chunk_text(gen_text, max_chars=max_chars) + for i, gen_text in enumerate(gen_text_batches): + print(f"gen_text {i}", gen_text) + print("\n") + + show_info(f"Generating audio in {len(gen_text_batches)} batches...") + return next( + infer_batch_process( + (audio, sr), + ref_text, + gen_text_batches, + model_obj, + vocoder, + mel_spec_type=mel_spec_type, + progress=progress, + target_rms=target_rms, + cross_fade_duration=cross_fade_duration, + nfe_step=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + speed=speed, + fix_duration=fix_duration, + device=device, + ) + ) + + +# infer batches + + +def infer_batch_process( + ref_audio, + ref_text, + gen_text_batches, + model_obj, + vocoder, + mel_spec_type="vocos", + progress=tqdm, + target_rms=0.1, + cross_fade_duration=0.15, + nfe_step=32, + cfg_strength=2.0, + sway_sampling_coef=-1, + speed=1, + fix_duration=None, + device=None, + streaming=False, + chunk_size=2048, +): + audio, sr = ref_audio + if audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True) + + rms = torch.sqrt(torch.mean(torch.square(audio))) + if rms < target_rms: + audio = audio * target_rms / rms + if sr != target_sample_rate: + resampler = torchaudio.transforms.Resample(sr, target_sample_rate) + audio = resampler(audio) + audio = audio.to(device) + + generated_waves = [] + spectrograms = [] + + if len(ref_text[-1].encode("utf-8")) == 1: + ref_text = ref_text + " " + + def process_batch(gen_text): + local_speed = speed + if len(gen_text.encode("utf-8")) < 10: + local_speed = 0.3 + + # Prepare the text + text_list = [ref_text + gen_text] + final_text_list = convert_char_to_pinyin(text_list) + + ref_audio_len = audio.shape[-1] // hop_length + if fix_duration is not None: + duration = int(fix_duration * target_sample_rate / hop_length) + else: + # Calculate duration + ref_text_len = len(ref_text.encode("utf-8")) + gen_text_len = len(gen_text.encode("utf-8")) + duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / local_speed) + + # inference + with torch.inference_mode(): + generated, _ = model_obj.sample( + cond=audio, + text=final_text_list, + duration=duration, + steps=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + ) + del _ + + generated = generated.to(torch.float32) # generated mel spectrogram + generated = generated[:, ref_audio_len:, :] + generated = generated.permute(0, 2, 1) + if mel_spec_type == "vocos": + generated_wave = vocoder.decode(generated) + elif mel_spec_type == "bigvgan": + generated_wave = vocoder(generated) + if rms < target_rms: + generated_wave = generated_wave * rms / target_rms + + # wav -> numpy + generated_wave = generated_wave.squeeze().cpu().numpy() + + if streaming: + for j in range(0, len(generated_wave), chunk_size): + yield generated_wave[j : j + chunk_size], target_sample_rate + else: + generated_cpu = generated[0].cpu().numpy() + del generated + yield generated_wave, generated_cpu + + if streaming: + for gen_text in progress.tqdm(gen_text_batches) if progress is not None else gen_text_batches: + for chunk in process_batch(gen_text): + yield chunk + else: + with ThreadPoolExecutor() as executor: + futures = [executor.submit(process_batch, gen_text) for gen_text in gen_text_batches] + for future in progress.tqdm(futures) if progress is not None else futures: + result = future.result() + if result: + generated_wave, generated_mel_spec = next(result) + generated_waves.append(generated_wave) + spectrograms.append(generated_mel_spec) + + if generated_waves: + if cross_fade_duration <= 0: + # Simply concatenate + final_wave = np.concatenate(generated_waves) + else: + # Combine all generated waves with cross-fading + final_wave = generated_waves[0] + for i in range(1, len(generated_waves)): + prev_wave = final_wave + next_wave = generated_waves[i] + + # Calculate cross-fade samples, ensuring it does not exceed wave lengths + cross_fade_samples = int(cross_fade_duration * target_sample_rate) + cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave)) + + if cross_fade_samples <= 0: + # No overlap possible, concatenate + final_wave = np.concatenate([prev_wave, next_wave]) + continue + + # Overlapping parts + prev_overlap = prev_wave[-cross_fade_samples:] + next_overlap = next_wave[:cross_fade_samples] + + # Fade out and fade in + fade_out = np.linspace(1, 0, cross_fade_samples) + fade_in = np.linspace(0, 1, cross_fade_samples) + + # Cross-faded overlap + cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in + + # Combine + new_wave = np.concatenate( + [prev_wave[:-cross_fade_samples], cross_faded_overlap, next_wave[cross_fade_samples:]] + ) + + final_wave = new_wave + + # Create a combined spectrogram + combined_spectrogram = np.concatenate(spectrograms, axis=1) + + yield final_wave, target_sample_rate, combined_spectrogram + + else: + yield None, target_sample_rate, None + + +# remove silence from generated wav + + +def remove_silence_for_generated_wav(filename): + aseg = AudioSegment.from_file(filename) + non_silent_segs = silence.split_on_silence( + aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500, seek_step=10 + ) + non_silent_wave = AudioSegment.silent(duration=0) + for non_silent_seg in non_silent_segs: + non_silent_wave += non_silent_seg + aseg = non_silent_wave + aseg.export(filename, format="wav") + + +# save spectrogram + + +def save_spectrogram(spectrogram, path): + plt.figure(figsize=(12, 4)) + plt.imshow(spectrogram, origin="lower", aspect="auto") + plt.colorbar() + plt.savefig(path) + plt.close() diff --git a/src/f5_tts/model/__init__.py b/src/f5_tts/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59cf691c9f73f357dd17b43faf08d549dcbb9550 --- /dev/null +++ b/src/f5_tts/model/__init__.py @@ -0,0 +1,10 @@ +from f5_tts.model.cfm import CFM + +from f5_tts.model.backbones.unett import UNetT +from f5_tts.model.backbones.dit import DiT +from f5_tts.model.backbones.mmdit import MMDiT + +from f5_tts.model.trainer import Trainer + + +__all__ = ["CFM", "UNetT", "DiT", "MMDiT", "Trainer"] diff --git a/src/f5_tts/model/__pycache__/__init__.cpython-310.pyc b/src/f5_tts/model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b7de0f3307f1dfb8c09ddb582190c551089e055 Binary files /dev/null and b/src/f5_tts/model/__pycache__/__init__.cpython-310.pyc differ diff --git a/src/f5_tts/model/__pycache__/cfm.cpython-310.pyc b/src/f5_tts/model/__pycache__/cfm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f54036c2e1ba440024de31a309635a82d31987b2 Binary files /dev/null and b/src/f5_tts/model/__pycache__/cfm.cpython-310.pyc differ diff --git a/src/f5_tts/model/__pycache__/dataset.cpython-310.pyc b/src/f5_tts/model/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69799df78cb33930036a86b4127370a2cc4001fa Binary files /dev/null and b/src/f5_tts/model/__pycache__/dataset.cpython-310.pyc differ diff --git a/src/f5_tts/model/__pycache__/modules.cpython-310.pyc b/src/f5_tts/model/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57bb3dce3fefa89b816913039d9edf736f310446 Binary files /dev/null and b/src/f5_tts/model/__pycache__/modules.cpython-310.pyc differ diff --git a/src/f5_tts/model/__pycache__/trainer.cpython-310.pyc b/src/f5_tts/model/__pycache__/trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43d5dfa4fd68c757af3606ff0eacf8630c7c4f31 Binary files /dev/null and b/src/f5_tts/model/__pycache__/trainer.cpython-310.pyc differ diff --git a/src/f5_tts/model/__pycache__/utils.cpython-310.pyc b/src/f5_tts/model/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3148092e20a606d9d64ab72bd92f56186f98970 Binary files /dev/null and b/src/f5_tts/model/__pycache__/utils.cpython-310.pyc differ diff --git a/src/f5_tts/model/backbones/README.md b/src/f5_tts/model/backbones/README.md new file mode 100644 index 0000000000000000000000000000000000000000..09bd4da5b51d3349b0136cec601d5d3ae9ed92f0 --- /dev/null +++ b/src/f5_tts/model/backbones/README.md @@ -0,0 +1,20 @@ +## Backbones quick introduction + + +### unett.py +- flat unet transformer +- structure same as in e2-tts & voicebox paper except using rotary pos emb +- possible abs pos emb & convnextv2 blocks for embedded text before concat + +### dit.py +- adaln-zero dit +- embedded timestep as condition +- concatted noised_input + masked_cond + embedded_text, linear proj in +- possible abs pos emb & convnextv2 blocks for embedded text before concat +- possible long skip connection (first layer to last layer) + +### mmdit.py +- stable diffusion 3 block structure +- timestep as condition +- left stream: text embedded and applied a abs pos emb +- right stream: masked_cond & noised_input concatted and with same conv pos emb as unett diff --git a/src/f5_tts/model/backbones/__pycache__/dit.cpython-310.pyc b/src/f5_tts/model/backbones/__pycache__/dit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80b54da27d17217bda78ef33942c63828d5c96c3 Binary files /dev/null and b/src/f5_tts/model/backbones/__pycache__/dit.cpython-310.pyc differ diff --git a/src/f5_tts/model/backbones/__pycache__/mmdit.cpython-310.pyc b/src/f5_tts/model/backbones/__pycache__/mmdit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21ab2c1e5f874fb5bf870ebcca1b658bb99bed13 Binary files /dev/null and b/src/f5_tts/model/backbones/__pycache__/mmdit.cpython-310.pyc differ diff --git a/src/f5_tts/model/backbones/__pycache__/unett.cpython-310.pyc b/src/f5_tts/model/backbones/__pycache__/unett.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e483bf663b09f71396dbc8efe8d8127f22993e3a Binary files /dev/null and b/src/f5_tts/model/backbones/__pycache__/unett.cpython-310.pyc differ diff --git a/src/f5_tts/model/backbones/dit.py b/src/f5_tts/model/backbones/dit.py new file mode 100644 index 0000000000000000000000000000000000000000..c4625285acb0d2d6e1d9b8a154edce45a654df7d --- /dev/null +++ b/src/f5_tts/model/backbones/dit.py @@ -0,0 +1,232 @@ +""" +ein notation: +b - batch +n - sequence +nt - text sequence +nw - raw wave length +d - dimension +""" + +from __future__ import annotations + +import torch +from torch import nn +import torch.nn.functional as F + +from x_transformers.x_transformers import RotaryEmbedding + +from f5_tts.model.modules import ( + TimestepEmbedding, + ConvNeXtV2Block, + ConvPositionEmbedding, + DiTBlock, + AdaLayerNorm_Final, + precompute_freqs_cis, + get_pos_embed_indices, +) + + +# Text embedding + + +class TextEmbedding(nn.Module): + def __init__(self, text_num_embeds, text_dim, mask_padding=True, conv_layers=0, conv_mult=2): + super().__init__() + self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token + + self.mask_padding = mask_padding # mask filler and batch padding tokens or not + + if conv_layers > 0: + self.extra_modeling = True + self.precompute_max_pos = 4096 # ~44s of 24khz audio + self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False) + self.text_blocks = nn.Sequential( + *[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)] + ) + else: + self.extra_modeling = False + + def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722 + text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx() + text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens + batch, text_len = text.shape[0], text.shape[1] + text = F.pad(text, (0, seq_len - text_len), value=0) + if self.mask_padding: + text_mask = text == 0 + + if drop_text: # cfg for text + text = torch.zeros_like(text) + + text = self.text_embed(text) # b n -> b n d + + # possible extra modeling + if self.extra_modeling: + # sinus pos emb + batch_start = torch.zeros((batch,), dtype=torch.long) + pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos) + text_pos_embed = self.freqs_cis[pos_idx] + text = text + text_pos_embed + + # convnextv2 blocks + if self.mask_padding: + text = text.masked_fill(text_mask.unsqueeze(-1).expand(-1, -1, text.size(-1)), 0.0) + for block in self.text_blocks: + text = block(text) + text = text.masked_fill(text_mask.unsqueeze(-1).expand(-1, -1, text.size(-1)), 0.0) + else: + text = self.text_blocks(text) + + return text + + +# noised input audio and context mixing embedding + + +class InputEmbedding(nn.Module): + def __init__(self, mel_dim, text_dim, out_dim): + super().__init__() + self.proj = nn.Linear(mel_dim * 2 + text_dim, out_dim) + self.conv_pos_embed = ConvPositionEmbedding(dim=out_dim) + + def forward(self, x: float["b n d"], cond: float["b n d"], text_embed: float["b n d"], drop_audio_cond=False): # noqa: F722 + if drop_audio_cond: # cfg for cond audio + cond = torch.zeros_like(cond) + + x = self.proj(torch.cat((x, cond, text_embed), dim=-1)) + x = self.conv_pos_embed(x) + x + return x + + +# Transformer backbone using DiT blocks + + +class DiT(nn.Module): + def __init__( + self, + *, + dim, + depth=8, + heads=8, + dim_head=64, + dropout=0.1, + ff_mult=4, + mel_dim=100, + text_num_embeds=256, + text_dim=None, + text_mask_padding=True, + qk_norm=None, + conv_layers=0, + pe_attn_head=None, + long_skip_connection=False, + checkpoint_activations=False, + ): + super().__init__() + + self.time_embed = TimestepEmbedding(dim) + if text_dim is None: + text_dim = mel_dim + self.text_embed = TextEmbedding( + text_num_embeds, text_dim, mask_padding=text_mask_padding, conv_layers=conv_layers + ) + self.text_cond, self.text_uncond = None, None # text cache + self.input_embed = InputEmbedding(mel_dim, text_dim, dim) + + self.rotary_embed = RotaryEmbedding(dim_head) + + self.dim = dim + self.depth = depth + + self.transformer_blocks = nn.ModuleList( + [ + DiTBlock( + dim=dim, + heads=heads, + dim_head=dim_head, + ff_mult=ff_mult, + dropout=dropout, + qk_norm=qk_norm, + pe_attn_head=pe_attn_head, + ) + for _ in range(depth) + ] + ) + self.long_skip_connection = nn.Linear(dim * 2, dim, bias=False) if long_skip_connection else None + + self.norm_out = AdaLayerNorm_Final(dim) # final modulation + self.proj_out = nn.Linear(dim, mel_dim) + + self.checkpoint_activations = checkpoint_activations + + self.initialize_weights() + + def initialize_weights(self): + # Zero-out AdaLN layers in DiT blocks: + for block in self.transformer_blocks: + nn.init.constant_(block.attn_norm.linear.weight, 0) + nn.init.constant_(block.attn_norm.linear.bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.norm_out.linear.weight, 0) + nn.init.constant_(self.norm_out.linear.bias, 0) + nn.init.constant_(self.proj_out.weight, 0) + nn.init.constant_(self.proj_out.bias, 0) + + def ckpt_wrapper(self, module): + # https://github.com/chuanyangjin/fast-DiT/blob/main/models.py + def ckpt_forward(*inputs): + outputs = module(*inputs) + return outputs + + return ckpt_forward + + def clear_cache(self): + self.text_cond, self.text_uncond = None, None + + def forward( + self, + x: float["b n d"], # nosied input audio # noqa: F722 + cond: float["b n d"], # masked cond audio # noqa: F722 + text: int["b nt"], # text # noqa: F722 + time: float["b"] | float[""], # time step # noqa: F821 F722 + drop_audio_cond, # cfg for cond audio + drop_text, # cfg for text + mask: bool["b n"] | None = None, # noqa: F722 + cache=False, + ): + batch, seq_len = x.shape[0], x.shape[1] + if time.ndim == 0: + time = time.repeat(batch) + + # t: conditioning time, text: text, x: noised audio + cond audio + text + t = self.time_embed(time) + if cache: + if drop_text: + if self.text_uncond is None: + self.text_uncond = self.text_embed(text, seq_len, drop_text=True) + text_embed = self.text_uncond + else: + if self.text_cond is None: + self.text_cond = self.text_embed(text, seq_len, drop_text=False) + text_embed = self.text_cond + else: + text_embed = self.text_embed(text, seq_len, drop_text=drop_text) + x = self.input_embed(x, cond, text_embed, drop_audio_cond=drop_audio_cond) + + rope = self.rotary_embed.forward_from_seq_len(seq_len) + + if self.long_skip_connection is not None: + residual = x + + for block in self.transformer_blocks: + if self.checkpoint_activations: + x = torch.utils.checkpoint.checkpoint(self.ckpt_wrapper(block), x, t, mask, rope) + else: + x = block(x, t, mask=mask, rope=rope) + + if self.long_skip_connection is not None: + x = self.long_skip_connection(torch.cat((x, residual), dim=-1)) + + x = self.norm_out(x, t) + output = self.proj_out(x) + + return output diff --git a/src/f5_tts/model/backbones/mmdit.py b/src/f5_tts/model/backbones/mmdit.py new file mode 100644 index 0000000000000000000000000000000000000000..d150555430886d64768af5b4a808701be288d01e --- /dev/null +++ b/src/f5_tts/model/backbones/mmdit.py @@ -0,0 +1,189 @@ +""" +ein notation: +b - batch +n - sequence +nt - text sequence +nw - raw wave length +d - dimension +""" + +from __future__ import annotations + +import torch +from torch import nn + +from x_transformers.x_transformers import RotaryEmbedding + +from f5_tts.model.modules import ( + TimestepEmbedding, + ConvPositionEmbedding, + MMDiTBlock, + AdaLayerNorm_Final, + precompute_freqs_cis, + get_pos_embed_indices, +) + + +# text embedding + + +class TextEmbedding(nn.Module): + def __init__(self, out_dim, text_num_embeds, mask_padding=True): + super().__init__() + self.text_embed = nn.Embedding(text_num_embeds + 1, out_dim) # will use 0 as filler token + + self.mask_padding = mask_padding # mask filler and batch padding tokens or not + + self.precompute_max_pos = 1024 + self.register_buffer("freqs_cis", precompute_freqs_cis(out_dim, self.precompute_max_pos), persistent=False) + + def forward(self, text: int["b nt"], drop_text=False) -> int["b nt d"]: # noqa: F722 + text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx() + if self.mask_padding: + text_mask = text == 0 + + if drop_text: # cfg for text + text = torch.zeros_like(text) + + text = self.text_embed(text) # b nt -> b nt d + + # sinus pos emb + batch_start = torch.zeros((text.shape[0],), dtype=torch.long) + batch_text_len = text.shape[1] + pos_idx = get_pos_embed_indices(batch_start, batch_text_len, max_pos=self.precompute_max_pos) + text_pos_embed = self.freqs_cis[pos_idx] + + text = text + text_pos_embed + + if self.mask_padding: + text = text.masked_fill(text_mask.unsqueeze(-1).expand(-1, -1, text.size(-1)), 0.0) + + return text + + +# noised input & masked cond audio embedding + + +class AudioEmbedding(nn.Module): + def __init__(self, in_dim, out_dim): + super().__init__() + self.linear = nn.Linear(2 * in_dim, out_dim) + self.conv_pos_embed = ConvPositionEmbedding(out_dim) + + def forward(self, x: float["b n d"], cond: float["b n d"], drop_audio_cond=False): # noqa: F722 + if drop_audio_cond: + cond = torch.zeros_like(cond) + x = torch.cat((x, cond), dim=-1) + x = self.linear(x) + x = self.conv_pos_embed(x) + x + return x + + +# Transformer backbone using MM-DiT blocks + + +class MMDiT(nn.Module): + def __init__( + self, + *, + dim, + depth=8, + heads=8, + dim_head=64, + dropout=0.1, + ff_mult=4, + mel_dim=100, + text_num_embeds=256, + text_mask_padding=True, + qk_norm=None, + ): + super().__init__() + + self.time_embed = TimestepEmbedding(dim) + self.text_embed = TextEmbedding(dim, text_num_embeds, mask_padding=text_mask_padding) + self.text_cond, self.text_uncond = None, None # text cache + self.audio_embed = AudioEmbedding(mel_dim, dim) + + self.rotary_embed = RotaryEmbedding(dim_head) + + self.dim = dim + self.depth = depth + + self.transformer_blocks = nn.ModuleList( + [ + MMDiTBlock( + dim=dim, + heads=heads, + dim_head=dim_head, + dropout=dropout, + ff_mult=ff_mult, + context_pre_only=i == depth - 1, + qk_norm=qk_norm, + ) + for i in range(depth) + ] + ) + self.norm_out = AdaLayerNorm_Final(dim) # final modulation + self.proj_out = nn.Linear(dim, mel_dim) + + self.initialize_weights() + + def initialize_weights(self): + # Zero-out AdaLN layers in MMDiT blocks: + for block in self.transformer_blocks: + nn.init.constant_(block.attn_norm_x.linear.weight, 0) + nn.init.constant_(block.attn_norm_x.linear.bias, 0) + nn.init.constant_(block.attn_norm_c.linear.weight, 0) + nn.init.constant_(block.attn_norm_c.linear.bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.norm_out.linear.weight, 0) + nn.init.constant_(self.norm_out.linear.bias, 0) + nn.init.constant_(self.proj_out.weight, 0) + nn.init.constant_(self.proj_out.bias, 0) + + def clear_cache(self): + self.text_cond, self.text_uncond = None, None + + def forward( + self, + x: float["b n d"], # nosied input audio # noqa: F722 + cond: float["b n d"], # masked cond audio # noqa: F722 + text: int["b nt"], # text # noqa: F722 + time: float["b"] | float[""], # time step # noqa: F821 F722 + drop_audio_cond, # cfg for cond audio + drop_text, # cfg for text + mask: bool["b n"] | None = None, # noqa: F722 + cache=False, + ): + batch = x.shape[0] + if time.ndim == 0: + time = time.repeat(batch) + + # t: conditioning (time), c: context (text + masked cond audio), x: noised input audio + t = self.time_embed(time) + if cache: + if drop_text: + if self.text_uncond is None: + self.text_uncond = self.text_embed(text, drop_text=True) + c = self.text_uncond + else: + if self.text_cond is None: + self.text_cond = self.text_embed(text, drop_text=False) + c = self.text_cond + else: + c = self.text_embed(text, drop_text=drop_text) + x = self.audio_embed(x, cond, drop_audio_cond=drop_audio_cond) + + seq_len = x.shape[1] + text_len = text.shape[1] + rope_audio = self.rotary_embed.forward_from_seq_len(seq_len) + rope_text = self.rotary_embed.forward_from_seq_len(text_len) + + for block in self.transformer_blocks: + c, x = block(x, c, t, mask=mask, rope=rope_audio, c_rope=rope_text) + + x = self.norm_out(x, t) + output = self.proj_out(x) + + return output diff --git a/src/f5_tts/model/backbones/unett.py b/src/f5_tts/model/backbones/unett.py new file mode 100644 index 0000000000000000000000000000000000000000..11e4d026544089c6664f3f4a5f00433d48e69ceb --- /dev/null +++ b/src/f5_tts/model/backbones/unett.py @@ -0,0 +1,250 @@ +""" +ein notation: +b - batch +n - sequence +nt - text sequence +nw - raw wave length +d - dimension +""" + +from __future__ import annotations +from typing import Literal + +import torch +from torch import nn +import torch.nn.functional as F + +from x_transformers import RMSNorm +from x_transformers.x_transformers import RotaryEmbedding + +from f5_tts.model.modules import ( + TimestepEmbedding, + ConvNeXtV2Block, + ConvPositionEmbedding, + Attention, + AttnProcessor, + FeedForward, + precompute_freqs_cis, + get_pos_embed_indices, +) + + +# Text embedding + + +class TextEmbedding(nn.Module): + def __init__(self, text_num_embeds, text_dim, mask_padding=True, conv_layers=0, conv_mult=2): + super().__init__() + self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token + + self.mask_padding = mask_padding # mask filler and batch padding tokens or not + + if conv_layers > 0: + self.extra_modeling = True + self.precompute_max_pos = 4096 # ~44s of 24khz audio + self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False) + self.text_blocks = nn.Sequential( + *[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)] + ) + else: + self.extra_modeling = False + + def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722 + text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx() + text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens + batch, text_len = text.shape[0], text.shape[1] + text = F.pad(text, (0, seq_len - text_len), value=0) + if self.mask_padding: + text_mask = text == 0 + + if drop_text: # cfg for text + text = torch.zeros_like(text) + + text = self.text_embed(text) # b n -> b n d + + # possible extra modeling + if self.extra_modeling: + # sinus pos emb + batch_start = torch.zeros((batch,), dtype=torch.long) + pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos) + text_pos_embed = self.freqs_cis[pos_idx] + text = text + text_pos_embed + + # convnextv2 blocks + if self.mask_padding: + text = text.masked_fill(text_mask.unsqueeze(-1).expand(-1, -1, text.size(-1)), 0.0) + for block in self.text_blocks: + text = block(text) + text = text.masked_fill(text_mask.unsqueeze(-1).expand(-1, -1, text.size(-1)), 0.0) + else: + text = self.text_blocks(text) + + return text + + +# noised input audio and context mixing embedding + + +class InputEmbedding(nn.Module): + def __init__(self, mel_dim, text_dim, out_dim): + super().__init__() + self.proj = nn.Linear(mel_dim * 2 + text_dim, out_dim) + self.conv_pos_embed = ConvPositionEmbedding(dim=out_dim) + + def forward(self, x: float["b n d"], cond: float["b n d"], text_embed: float["b n d"], drop_audio_cond=False): # noqa: F722 + if drop_audio_cond: # cfg for cond audio + cond = torch.zeros_like(cond) + + x = self.proj(torch.cat((x, cond, text_embed), dim=-1)) + x = self.conv_pos_embed(x) + x + return x + + +# Flat UNet Transformer backbone + + +class UNetT(nn.Module): + def __init__( + self, + *, + dim, + depth=8, + heads=8, + dim_head=64, + dropout=0.1, + ff_mult=4, + mel_dim=100, + text_num_embeds=256, + text_dim=None, + text_mask_padding=True, + qk_norm=None, + conv_layers=0, + pe_attn_head=None, + skip_connect_type: Literal["add", "concat", "none"] = "concat", + ): + super().__init__() + assert depth % 2 == 0, "UNet-Transformer's depth should be even." + + self.time_embed = TimestepEmbedding(dim) + if text_dim is None: + text_dim = mel_dim + self.text_embed = TextEmbedding( + text_num_embeds, text_dim, mask_padding=text_mask_padding, conv_layers=conv_layers + ) + self.text_cond, self.text_uncond = None, None # text cache + self.input_embed = InputEmbedding(mel_dim, text_dim, dim) + + self.rotary_embed = RotaryEmbedding(dim_head) + + # transformer layers & skip connections + + self.dim = dim + self.skip_connect_type = skip_connect_type + needs_skip_proj = skip_connect_type == "concat" + + self.depth = depth + self.layers = nn.ModuleList([]) + + for idx in range(depth): + is_later_half = idx >= (depth // 2) + + attn_norm = RMSNorm(dim) + attn = Attention( + processor=AttnProcessor(pe_attn_head=pe_attn_head), + dim=dim, + heads=heads, + dim_head=dim_head, + dropout=dropout, + qk_norm=qk_norm, + ) + + ff_norm = RMSNorm(dim) + ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh") + + skip_proj = nn.Linear(dim * 2, dim, bias=False) if needs_skip_proj and is_later_half else None + + self.layers.append( + nn.ModuleList( + [ + skip_proj, + attn_norm, + attn, + ff_norm, + ff, + ] + ) + ) + + self.norm_out = RMSNorm(dim) + self.proj_out = nn.Linear(dim, mel_dim) + + def clear_cache(self): + self.text_cond, self.text_uncond = None, None + + def forward( + self, + x: float["b n d"], # nosied input audio # noqa: F722 + cond: float["b n d"], # masked cond audio # noqa: F722 + text: int["b nt"], # text # noqa: F722 + time: float["b"] | float[""], # time step # noqa: F821 F722 + drop_audio_cond, # cfg for cond audio + drop_text, # cfg for text + mask: bool["b n"] | None = None, # noqa: F722 + cache=False, + ): + batch, seq_len = x.shape[0], x.shape[1] + if time.ndim == 0: + time = time.repeat(batch) + + # t: conditioning time, c: context (text + masked cond audio), x: noised input audio + t = self.time_embed(time) + if cache: + if drop_text: + if self.text_uncond is None: + self.text_uncond = self.text_embed(text, seq_len, drop_text=True) + text_embed = self.text_uncond + else: + if self.text_cond is None: + self.text_cond = self.text_embed(text, seq_len, drop_text=False) + text_embed = self.text_cond + else: + text_embed = self.text_embed(text, seq_len, drop_text=drop_text) + x = self.input_embed(x, cond, text_embed, drop_audio_cond=drop_audio_cond) + + # postfix time t to input x, [b n d] -> [b n+1 d] + x = torch.cat([t.unsqueeze(1), x], dim=1) # pack t to x + if mask is not None: + mask = F.pad(mask, (1, 0), value=1) + + rope = self.rotary_embed.forward_from_seq_len(seq_len + 1) + + # flat unet transformer + skip_connect_type = self.skip_connect_type + skips = [] + for idx, (maybe_skip_proj, attn_norm, attn, ff_norm, ff) in enumerate(self.layers): + layer = idx + 1 + + # skip connection logic + is_first_half = layer <= (self.depth // 2) + is_later_half = not is_first_half + + if is_first_half: + skips.append(x) + + if is_later_half: + skip = skips.pop() + if skip_connect_type == "concat": + x = torch.cat((x, skip), dim=-1) + x = maybe_skip_proj(x) + elif skip_connect_type == "add": + x = x + skip + + # attention and feedforward blocks + x = attn(attn_norm(x), rope=rope, mask=mask) + x + x = ff(ff_norm(x)) + x + + assert len(skips) == 0 + + x = self.norm_out(x)[:, 1:, :] # unpack t from x + + return self.proj_out(x) diff --git a/src/f5_tts/model/cfm.py b/src/f5_tts/model/cfm.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4b67f846e2f8992e19444ac8275b905e3a50ae --- /dev/null +++ b/src/f5_tts/model/cfm.py @@ -0,0 +1,283 @@ +""" +ein notation: +b - batch +n - sequence +nt - text sequence +nw - raw wave length +d - dimension +""" + +from __future__ import annotations + +from random import random +from typing import Callable + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.utils.rnn import pad_sequence +from torchdiffeq import odeint + +from f5_tts.model.modules import MelSpec +from f5_tts.model.utils import ( + default, + exists, + lens_to_mask, + list_str_to_idx, + list_str_to_tensor, + mask_from_frac_lengths, +) + + +class CFM(nn.Module): + def __init__( + self, + transformer: nn.Module, + sigma=0.0, + odeint_kwargs: dict = dict( + # atol = 1e-5, + # rtol = 1e-5, + method="euler" # 'midpoint' + ), + audio_drop_prob=0.3, + cond_drop_prob=0.2, + num_channels=None, + mel_spec_module: nn.Module | None = None, + mel_spec_kwargs: dict = dict(), + frac_lengths_mask: tuple[float, float] = (0.7, 1.0), + vocab_char_map: dict[str:int] | None = None, + ): + super().__init__() + + self.frac_lengths_mask = frac_lengths_mask + + # mel spec + self.mel_spec = default(mel_spec_module, MelSpec(**mel_spec_kwargs)) + num_channels = default(num_channels, self.mel_spec.n_mel_channels) + self.num_channels = num_channels + + # classifier-free guidance + self.audio_drop_prob = audio_drop_prob + self.cond_drop_prob = cond_drop_prob + + # transformer + self.transformer = transformer + dim = transformer.dim + self.dim = dim + + # conditional flow related + self.sigma = sigma + + # sampling related + self.odeint_kwargs = odeint_kwargs + + # vocab map for tokenization + self.vocab_char_map = vocab_char_map + + @property + def device(self): + return next(self.parameters()).device + + @torch.no_grad() + def sample( + self, + cond: float["b n d"] | float["b nw"], # noqa: F722 + text: int["b nt"] | list[str], # noqa: F722 + duration: int | int["b"], # noqa: F821 + *, + lens: int["b"] | None = None, # noqa: F821 + steps=32, + cfg_strength=1.0, + sway_sampling_coef=None, + seed: int | None = None, + max_duration=4096, + vocoder: Callable[[float["b d n"]], float["b nw"]] | None = None, # noqa: F722 + no_ref_audio=False, + duplicate_test=False, + t_inter=0.1, + edit_mask=None, + ): + self.eval() + # raw wave + + if cond.ndim == 2: + cond = self.mel_spec(cond) + cond = cond.permute(0, 2, 1) + assert cond.shape[-1] == self.num_channels + + cond = cond.to(next(self.parameters()).dtype) + + batch, cond_seq_len, device = *cond.shape[:2], cond.device + if not exists(lens): + lens = torch.full((batch,), cond_seq_len, device=device, dtype=torch.long) + + # text + + if isinstance(text, list): + if exists(self.vocab_char_map): + text = list_str_to_idx(text, self.vocab_char_map).to(device) + else: + text = list_str_to_tensor(text).to(device) + assert text.shape[0] == batch + + # duration + + cond_mask = lens_to_mask(lens) + if edit_mask is not None: + cond_mask = cond_mask & edit_mask + + if isinstance(duration, int): + duration = torch.full((batch,), duration, device=device, dtype=torch.long) + + duration = torch.maximum( + torch.maximum((text != -1).sum(dim=-1), lens) + 1, duration + ) # duration at least text/audio prompt length plus one token, so something is generated + duration = duration.clamp(max=max_duration) + max_duration = duration.amax() + + # duplicate test corner for inner time step oberservation + if duplicate_test: + test_cond = F.pad(cond, (0, 0, cond_seq_len, max_duration - 2 * cond_seq_len), value=0.0) + + cond = F.pad(cond, (0, 0, 0, max_duration - cond_seq_len), value=0.0) + if no_ref_audio: + cond = torch.zeros_like(cond) + + cond_mask = F.pad(cond_mask, (0, max_duration - cond_mask.shape[-1]), value=False) + cond_mask = cond_mask.unsqueeze(-1) + step_cond = torch.where( + cond_mask, cond, torch.zeros_like(cond) + ) # allow direct control (cut cond audio) with lens passed in + + if batch > 1: + mask = lens_to_mask(duration) + else: # save memory and speed up, as single inference need no mask currently + mask = None + + # neural ode + + def fn(t, x): + # at each step, conditioning is fixed + # step_cond = torch.where(cond_mask, cond, torch.zeros_like(cond)) + + # predict flow + pred = self.transformer( + x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=False, drop_text=False, cache=True + ) + if cfg_strength < 1e-5: + return pred + + null_pred = self.transformer( + x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=True, drop_text=True, cache=True + ) + return pred + (pred - null_pred) * cfg_strength + + # noise input + # to make sure batch inference result is same with different batch size, and for sure single inference + # still some difference maybe due to convolutional layers + y0 = [] + for dur in duration: + if exists(seed): + torch.manual_seed(seed) + y0.append(torch.randn(dur, self.num_channels, device=self.device, dtype=step_cond.dtype)) + y0 = pad_sequence(y0, padding_value=0, batch_first=True) + + t_start = 0 + + # duplicate test corner for inner time step oberservation + if duplicate_test: + t_start = t_inter + y0 = (1 - t_start) * y0 + t_start * test_cond + steps = int(steps * (1 - t_start)) + + t = torch.linspace(t_start, 1, steps + 1, device=self.device, dtype=step_cond.dtype) + if sway_sampling_coef is not None: + t = t + sway_sampling_coef * (torch.cos(torch.pi / 2 * t) - 1 + t) + + trajectory = odeint(fn, y0, t, **self.odeint_kwargs) + self.transformer.clear_cache() + + sampled = trajectory[-1] + out = sampled + out = torch.where(cond_mask, cond, out) + + if exists(vocoder): + out = out.permute(0, 2, 1) + out = vocoder(out) + + return out, trajectory + + def forward( + self, + inp: float["b n d"] | float["b nw"], # mel or raw wave # noqa: F722 + text: int["b nt"] | list[str], # noqa: F722 + *, + lens: int["b"] | None = None, # noqa: F821 + noise_scheduler: str | None = None, + ): + # handle raw wave + if inp.ndim == 2: + inp = self.mel_spec(inp) + inp = inp.permute(0, 2, 1) + assert inp.shape[-1] == self.num_channels + + batch, seq_len, dtype, device, _σ1 = *inp.shape[:2], inp.dtype, self.device, self.sigma + + # handle text as string + if isinstance(text, list): + if exists(self.vocab_char_map): + text = list_str_to_idx(text, self.vocab_char_map).to(device) + else: + text = list_str_to_tensor(text).to(device) + assert text.shape[0] == batch + + # lens and mask + if not exists(lens): + lens = torch.full((batch,), seq_len, device=device) + + mask = lens_to_mask(lens, length=seq_len) # useless here, as collate_fn will pad to max length in batch + + # get a random span to mask out for training conditionally + frac_lengths = torch.zeros((batch,), device=self.device).float().uniform_(*self.frac_lengths_mask) + rand_span_mask = mask_from_frac_lengths(lens, frac_lengths) + + if exists(mask): + rand_span_mask &= mask + + # mel is x1 + x1 = inp + + # x0 is gaussian noise + x0 = torch.randn_like(x1) + + # time step + time = torch.rand((batch,), dtype=dtype, device=self.device) + # TODO. noise_scheduler + + # sample xt (φ_t(x) in the paper) + t = time.unsqueeze(-1).unsqueeze(-1) + φ = (1 - t) * x0 + t * x1 + flow = x1 - x0 + + # only predict what is within the random mask span for infilling + cond = torch.where(rand_span_mask[..., None], torch.zeros_like(x1), x1) + + # transformer and cfg training with a drop rate + drop_audio_cond = random() < self.audio_drop_prob # p_drop in voicebox paper + if random() < self.cond_drop_prob: # p_uncond in voicebox paper + drop_audio_cond = True + drop_text = True + else: + drop_text = False + + # if want rigourously mask out padding, record in collate_fn in dataset.py, and pass in here + # adding mask will use more memory, thus also need to adjust batchsampler with scaled down threshold for long sequences + pred = self.transformer( + x=φ, cond=cond, text=text, time=time, drop_audio_cond=drop_audio_cond, drop_text=drop_text + ) + + # flow matching loss + loss = F.mse_loss(pred, flow, reduction="none") + loss = loss[rand_span_mask] + + return loss.mean(), cond, pred diff --git a/src/f5_tts/model/dataset.py b/src/f5_tts/model/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b0622aa0c131804ee983f575fa3c07c2d38a68f8 --- /dev/null +++ b/src/f5_tts/model/dataset.py @@ -0,0 +1,330 @@ +import json +from importlib.resources import files + +import torch +import torch.nn.functional as F +import torchaudio +from datasets import Dataset as Dataset_ +from datasets import load_from_disk +from torch import nn +from torch.utils.data import Dataset, Sampler +from tqdm import tqdm + +from f5_tts.model.modules import MelSpec +from f5_tts.model.utils import default + + +class HFDataset(Dataset): + def __init__( + self, + hf_dataset: Dataset, + target_sample_rate=24_000, + n_mel_channels=100, + hop_length=256, + n_fft=1024, + win_length=1024, + mel_spec_type="vocos", + ): + self.data = hf_dataset + self.target_sample_rate = target_sample_rate + self.hop_length = hop_length + + self.mel_spectrogram = MelSpec( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ) + + def get_frame_len(self, index): + row = self.data[index] + audio = row["audio"]["array"] + sample_rate = row["audio"]["sampling_rate"] + return audio.shape[-1] / sample_rate * self.target_sample_rate / self.hop_length + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + row = self.data[index] + audio = row["audio"]["array"] + + # logger.info(f"Audio shape: {audio.shape}") + + sample_rate = row["audio"]["sampling_rate"] + duration = audio.shape[-1] / sample_rate + + if duration > 30 or duration < 0.3: + return self.__getitem__((index + 1) % len(self.data)) + + audio_tensor = torch.from_numpy(audio).float() + + if sample_rate != self.target_sample_rate: + resampler = torchaudio.transforms.Resample(sample_rate, self.target_sample_rate) + audio_tensor = resampler(audio_tensor) + + audio_tensor = audio_tensor.unsqueeze(0) # 't -> 1 t') + + mel_spec = self.mel_spectrogram(audio_tensor) + + mel_spec = mel_spec.squeeze(0) # '1 d t -> d t' + + text = row["text"] + + return dict( + mel_spec=mel_spec, + text=text, + ) + + +class CustomDataset(Dataset): + def __init__( + self, + custom_dataset: Dataset, + durations=None, + target_sample_rate=24_000, + hop_length=256, + n_mel_channels=100, + n_fft=1024, + win_length=1024, + mel_spec_type="vocos", + preprocessed_mel=False, + mel_spec_module: nn.Module | None = None, + ): + self.data = custom_dataset + self.durations = durations + self.target_sample_rate = target_sample_rate + self.hop_length = hop_length + self.n_fft = n_fft + self.win_length = win_length + self.mel_spec_type = mel_spec_type + self.preprocessed_mel = preprocessed_mel + + if not preprocessed_mel: + self.mel_spectrogram = default( + mel_spec_module, + MelSpec( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ), + ) + + def get_frame_len(self, index): + if ( + self.durations is not None + ): # Please make sure the separately provided durations are correct, otherwise 99.99% OOM + return self.durations[index] * self.target_sample_rate / self.hop_length + return self.data[index]["duration"] * self.target_sample_rate / self.hop_length + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + while True: + row = self.data[index] + audio_path = row["audio_path"] + text = row["text"] + duration = row["duration"] + + # filter by given length + if 0.3 <= duration <= 30: + break # valid + + index = (index + 1) % len(self.data) + + if self.preprocessed_mel: + mel_spec = torch.tensor(row["mel_spec"]) + else: + audio, source_sample_rate = torchaudio.load(audio_path) + + # make sure mono input + if audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True) + + # resample if necessary + if source_sample_rate != self.target_sample_rate: + resampler = torchaudio.transforms.Resample(source_sample_rate, self.target_sample_rate) + audio = resampler(audio) + + # to mel spectrogram + mel_spec = self.mel_spectrogram(audio) + mel_spec = mel_spec.squeeze(0) # '1 d t -> d t' + + return { + "mel_spec": mel_spec, + "text": text, + } + + +# Dynamic Batch Sampler +class DynamicBatchSampler(Sampler[list[int]]): + """Extension of Sampler that will do the following: + 1. Change the batch size (essentially number of sequences) + in a batch to ensure that the total number of frames are less + than a certain threshold. + 2. Make sure the padding efficiency in the batch is high. + 3. Shuffle batches each epoch while maintaining reproducibility. + """ + + def __init__( + self, sampler: Sampler[int], frames_threshold: int, max_samples=0, random_seed=None, drop_residual: bool = False + ): + self.sampler = sampler + self.frames_threshold = frames_threshold + self.max_samples = max_samples + self.random_seed = random_seed + self.epoch = 0 + + indices, batches = [], [] + data_source = self.sampler.data_source + + for idx in tqdm( + self.sampler, desc="Sorting with sampler... if slow, check whether dataset is provided with duration" + ): + indices.append((idx, data_source.get_frame_len(idx))) + indices.sort(key=lambda elem: elem[1]) + + batch = [] + batch_frames = 0 + for idx, frame_len in tqdm( + indices, desc=f"Creating dynamic batches with {frames_threshold} audio frames per gpu" + ): + if batch_frames + frame_len <= self.frames_threshold and (max_samples == 0 or len(batch) < max_samples): + batch.append(idx) + batch_frames += frame_len + else: + if len(batch) > 0: + batches.append(batch) + if frame_len <= self.frames_threshold: + batch = [idx] + batch_frames = frame_len + else: + batch = [] + batch_frames = 0 + + if not drop_residual and len(batch) > 0: + batches.append(batch) + + del indices + self.batches = batches + + # Ensure even batches with accelerate BatchSamplerShard cls under frame_per_batch setting + self.drop_last = True + + def set_epoch(self, epoch: int) -> None: + """Sets the epoch for this sampler.""" + self.epoch = epoch + + def __iter__(self): + # Use both random_seed and epoch for deterministic but different shuffling per epoch + if self.random_seed is not None: + g = torch.Generator() + g.manual_seed(self.random_seed + self.epoch) + # Use PyTorch's random permutation for better reproducibility across PyTorch versions + indices = torch.randperm(len(self.batches), generator=g).tolist() + batches = [self.batches[i] for i in indices] + else: + batches = self.batches + return iter(batches) + + def __len__(self): + return len(self.batches) + + +# Load dataset + + +def load_dataset( + dataset_name: str, + tokenizer: str = "pinyin", + dataset_type: str = "CustomDataset", + audio_type: str = "raw", + mel_spec_module: nn.Module | None = None, + mel_spec_kwargs: dict = dict(), +) -> CustomDataset | HFDataset: + """ + dataset_type - "CustomDataset" if you want to use tokenizer name and default data path to load for train_dataset + - "CustomDatasetPath" if you just want to pass the full path to a preprocessed dataset without relying on tokenizer + """ + + print("Loading dataset ...") + + if dataset_type == "CustomDataset": + rel_data_path = str(files("f5_tts").joinpath(f"../../data/{dataset_name}")) + if audio_type == "raw": + try: + train_dataset = load_from_disk(f"{rel_data_path}/raw") + except: # noqa: E722 + train_dataset = Dataset_.from_file(f"{rel_data_path}/raw.arrow") + preprocessed_mel = False + elif audio_type == "mel": + train_dataset = Dataset_.from_file(f"{rel_data_path}/mel.arrow") + preprocessed_mel = True + with open(f"{rel_data_path}/duration.json", "r", encoding="utf-8") as f: + data_dict = json.load(f) + durations = data_dict["duration"] + train_dataset = CustomDataset( + train_dataset, + durations=durations, + preprocessed_mel=preprocessed_mel, + mel_spec_module=mel_spec_module, + **mel_spec_kwargs, + ) + + elif dataset_type == "CustomDatasetPath": + try: + train_dataset = load_from_disk(f"{dataset_name}/raw") + except: # noqa: E722 + train_dataset = Dataset_.from_file(f"{dataset_name}/raw.arrow") + + with open(f"{dataset_name}/duration.json", "r", encoding="utf-8") as f: + data_dict = json.load(f) + durations = data_dict["duration"] + train_dataset = CustomDataset( + train_dataset, durations=durations, preprocessed_mel=preprocessed_mel, **mel_spec_kwargs + ) + + elif dataset_type == "HFDataset": + print( + "Should manually modify the path of huggingface dataset to your need.\n" + + "May also the corresponding script cuz different dataset may have different format." + ) + pre, post = dataset_name.split("_") + train_dataset = HFDataset( + load_dataset(f"{pre}/{pre}", split=f"train.{post}", cache_dir=str(files("f5_tts").joinpath("../../data"))), + ) + + return train_dataset + + +# collation + + +def collate_fn(batch): + mel_specs = [item["mel_spec"].squeeze(0) for item in batch] + mel_lengths = torch.LongTensor([spec.shape[-1] for spec in mel_specs]) + max_mel_length = mel_lengths.amax() + + padded_mel_specs = [] + for spec in mel_specs: # TODO. maybe records mask for attention here + padding = (0, max_mel_length - spec.size(-1)) + padded_spec = F.pad(spec, padding, value=0) + padded_mel_specs.append(padded_spec) + + mel_specs = torch.stack(padded_mel_specs) + + text = [item["text"] for item in batch] + text_lengths = torch.LongTensor([len(item) for item in text]) + + return dict( + mel=mel_specs, + mel_lengths=mel_lengths, + text=text, + text_lengths=text_lengths, + ) diff --git a/src/f5_tts/model/modules.py b/src/f5_tts/model/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..8e5c3c27a29882a52156f38e0ecc4553f257ac8f --- /dev/null +++ b/src/f5_tts/model/modules.py @@ -0,0 +1,731 @@ +""" +ein notation: +b - batch +n - sequence +nt - text sequence +nw - raw wave length +d - dimension +""" + +from __future__ import annotations + +import math +from typing import Optional + +import torch +import torch.nn.functional as F +import torchaudio +from librosa.filters import mel as librosa_mel_fn +from torch import nn +from x_transformers.x_transformers import apply_rotary_pos_emb + + +# raw wav to mel spec + + +mel_basis_cache = {} +hann_window_cache = {} + + +def get_bigvgan_mel_spectrogram( + waveform, + n_fft=1024, + n_mel_channels=100, + target_sample_rate=24000, + hop_length=256, + win_length=1024, + fmin=0, + fmax=None, + center=False, +): # Copy from https://github.com/NVIDIA/BigVGAN/tree/main + device = waveform.device + key = f"{n_fft}_{n_mel_channels}_{target_sample_rate}_{hop_length}_{win_length}_{fmin}_{fmax}_{device}" + + if key not in mel_basis_cache: + mel = librosa_mel_fn(sr=target_sample_rate, n_fft=n_fft, n_mels=n_mel_channels, fmin=fmin, fmax=fmax) + mel_basis_cache[key] = torch.from_numpy(mel).float().to(device) # TODO: why they need .float()? + hann_window_cache[key] = torch.hann_window(win_length).to(device) + + mel_basis = mel_basis_cache[key] + hann_window = hann_window_cache[key] + + padding = (n_fft - hop_length) // 2 + waveform = torch.nn.functional.pad(waveform.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1) + + spec = torch.stft( + waveform, + n_fft, + hop_length=hop_length, + win_length=win_length, + window=hann_window, + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9) + + mel_spec = torch.matmul(mel_basis, spec) + mel_spec = torch.log(torch.clamp(mel_spec, min=1e-5)) + + return mel_spec + + +def get_vocos_mel_spectrogram( + waveform, + n_fft=1024, + n_mel_channels=100, + target_sample_rate=24000, + hop_length=256, + win_length=1024, +): + mel_stft = torchaudio.transforms.MelSpectrogram( + sample_rate=target_sample_rate, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + n_mels=n_mel_channels, + power=1, + center=True, + normalized=False, + norm=None, + ).to(waveform.device) + if len(waveform.shape) == 3: + waveform = waveform.squeeze(1) # 'b 1 nw -> b nw' + + assert len(waveform.shape) == 2 + + mel = mel_stft(waveform) + mel = mel.clamp(min=1e-5).log() + return mel + + +class MelSpec(nn.Module): + def __init__( + self, + n_fft=1024, + hop_length=256, + win_length=1024, + n_mel_channels=100, + target_sample_rate=24_000, + mel_spec_type="vocos", + ): + super().__init__() + assert mel_spec_type in ["vocos", "bigvgan"], print("We only support two extract mel backend: vocos or bigvgan") + + self.n_fft = n_fft + self.hop_length = hop_length + self.win_length = win_length + self.n_mel_channels = n_mel_channels + self.target_sample_rate = target_sample_rate + + if mel_spec_type == "vocos": + self.extractor = get_vocos_mel_spectrogram + elif mel_spec_type == "bigvgan": + self.extractor = get_bigvgan_mel_spectrogram + + self.register_buffer("dummy", torch.tensor(0), persistent=False) + + def forward(self, wav): + if self.dummy.device != wav.device: + self.to(wav.device) + + mel = self.extractor( + waveform=wav, + n_fft=self.n_fft, + n_mel_channels=self.n_mel_channels, + target_sample_rate=self.target_sample_rate, + hop_length=self.hop_length, + win_length=self.win_length, + ) + + return mel + + +# sinusoidal position embedding + + +class SinusPositionEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x, scale=1000): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb) + emb = scale * x.unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +# convolutional position embedding + + +class ConvPositionEmbedding(nn.Module): + def __init__(self, dim, kernel_size=31, groups=16): + super().__init__() + assert kernel_size % 2 != 0 + self.conv1d = nn.Sequential( + nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2), + nn.Mish(), + nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2), + nn.Mish(), + ) + + def forward(self, x: float["b n d"], mask: bool["b n"] | None = None): # noqa: F722 + if mask is not None: + mask = mask[..., None] + x = x.masked_fill(~mask, 0.0) + + x = x.permute(0, 2, 1) + x = self.conv1d(x) + out = x.permute(0, 2, 1) + + if mask is not None: + out = out.masked_fill(~mask, 0.0) + + return out + + +# rotary positional embedding related + + +def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0, theta_rescale_factor=1.0): + # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning + # has some connection to NTK literature + # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ + # https://github.com/lucidrains/rotary-embedding-torch/blob/main/rotary_embedding_torch/rotary_embedding_torch.py + theta *= theta_rescale_factor ** (dim / (dim - 2)) + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) + t = torch.arange(end, device=freqs.device) # type: ignore + freqs = torch.outer(t, freqs).float() # type: ignore + freqs_cos = torch.cos(freqs) # real part + freqs_sin = torch.sin(freqs) # imaginary part + return torch.cat([freqs_cos, freqs_sin], dim=-1) + + +def get_pos_embed_indices(start, length, max_pos, scale=1.0): + # length = length if isinstance(length, int) else length.max() + scale = scale * torch.ones_like(start, dtype=torch.float32) # in case scale is a scalar + pos = ( + start.unsqueeze(1) + + (torch.arange(length, device=start.device, dtype=torch.float32).unsqueeze(0) * scale.unsqueeze(1)).long() + ) + # avoid extra long error. + pos = torch.where(pos < max_pos, pos, max_pos - 1) + return pos + + +# Global Response Normalization layer (Instance Normalization ?) + + +class GRN(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.zeros(1, 1, dim)) + self.beta = nn.Parameter(torch.zeros(1, 1, dim)) + + def forward(self, x): + Gx = torch.norm(x, p=2, dim=1, keepdim=True) + Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6) + return self.gamma * (x * Nx) + self.beta + x + + +# ConvNeXt-V2 Block https://github.com/facebookresearch/ConvNeXt-V2/blob/main/models/convnextv2.py +# ref: https://github.com/bfs18/e2_tts/blob/main/rfwave/modules.py#L108 + + +class ConvNeXtV2Block(nn.Module): + def __init__( + self, + dim: int, + intermediate_dim: int, + dilation: int = 1, + ): + super().__init__() + padding = (dilation * (7 - 1)) // 2 + self.dwconv = nn.Conv1d( + dim, dim, kernel_size=7, padding=padding, groups=dim, dilation=dilation + ) # depthwise conv + self.norm = nn.LayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, intermediate_dim) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.grn = GRN(intermediate_dim) + self.pwconv2 = nn.Linear(intermediate_dim, dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + x = x.transpose(1, 2) # b n d -> b d n + x = self.dwconv(x) + x = x.transpose(1, 2) # b d n -> b n d + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.grn(x) + x = self.pwconv2(x) + return residual + x + + +# RMSNorm + + +class RMSNorm(nn.Module): + def __init__(self, dim: int, eps: float): + super().__init__() + self.eps = eps + self.weight = nn.Parameter(torch.ones(dim)) + self.native_rms_norm = float(torch.__version__[:3]) >= 2.4 + + def forward(self, x): + if self.native_rms_norm: + if self.weight.dtype in [torch.float16, torch.bfloat16]: + x = x.to(self.weight.dtype) + x = F.rms_norm(x, normalized_shape=(x.shape[-1],), weight=self.weight, eps=self.eps) + else: + variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True) + x = x * torch.rsqrt(variance + self.eps) + if self.weight.dtype in [torch.float16, torch.bfloat16]: + x = x.to(self.weight.dtype) + x = x * self.weight + + return x + + +# AdaLayerNorm +# return with modulated x for attn input, and params for later mlp modulation + + +class AdaLayerNorm(nn.Module): + def __init__(self, dim): + super().__init__() + + self.silu = nn.SiLU() + self.linear = nn.Linear(dim, dim * 6) + + self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + + def forward(self, x, emb=None): + emb = self.linear(self.silu(emb)) + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1) + + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x, gate_msa, shift_mlp, scale_mlp, gate_mlp + + +# AdaLayerNorm for final layer +# return only with modulated x for attn input, cuz no more mlp modulation + + +class AdaLayerNorm_Final(nn.Module): + def __init__(self, dim): + super().__init__() + + self.silu = nn.SiLU() + self.linear = nn.Linear(dim, dim * 2) + + self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + + def forward(self, x, emb): + emb = self.linear(self.silu(emb)) + scale, shift = torch.chunk(emb, 2, dim=1) + + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x + + +# FeedForward + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, dropout=0.0, approximate: str = "none"): + super().__init__() + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + + activation = nn.GELU(approximate=approximate) + project_in = nn.Sequential(nn.Linear(dim, inner_dim), activation) + self.ff = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)) + + def forward(self, x): + return self.ff(x) + + +# Attention with possible joint part +# modified from diffusers/src/diffusers/models/attention_processor.py + + +class Attention(nn.Module): + def __init__( + self, + processor: JointAttnProcessor | AttnProcessor, + dim: int, + heads: int = 8, + dim_head: int = 64, + dropout: float = 0.0, + context_dim: Optional[int] = None, # if not None -> joint attention + context_pre_only: bool = False, + qk_norm: Optional[str] = None, + ): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("Attention equires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + self.processor = processor + + self.dim = dim + self.heads = heads + self.inner_dim = dim_head * heads + self.dropout = dropout + + self.context_dim = context_dim + self.context_pre_only = context_pre_only + + self.to_q = nn.Linear(dim, self.inner_dim) + self.to_k = nn.Linear(dim, self.inner_dim) + self.to_v = nn.Linear(dim, self.inner_dim) + + if qk_norm is None: + self.q_norm = None + self.k_norm = None + elif qk_norm == "rms_norm": + self.q_norm = RMSNorm(dim_head, eps=1e-6) + self.k_norm = RMSNorm(dim_head, eps=1e-6) + else: + raise ValueError(f"Unimplemented qk_norm: {qk_norm}") + + if self.context_dim is not None: + self.to_q_c = nn.Linear(context_dim, self.inner_dim) + self.to_k_c = nn.Linear(context_dim, self.inner_dim) + self.to_v_c = nn.Linear(context_dim, self.inner_dim) + if qk_norm is None: + self.c_q_norm = None + self.c_k_norm = None + elif qk_norm == "rms_norm": + self.c_q_norm = RMSNorm(dim_head, eps=1e-6) + self.c_k_norm = RMSNorm(dim_head, eps=1e-6) + + self.to_out = nn.ModuleList([]) + self.to_out.append(nn.Linear(self.inner_dim, dim)) + self.to_out.append(nn.Dropout(dropout)) + + if self.context_dim is not None and not self.context_pre_only: + self.to_out_c = nn.Linear(self.inner_dim, context_dim) + + def forward( + self, + x: float["b n d"], # noised input x # noqa: F722 + c: float["b n d"] = None, # context c # noqa: F722 + mask: bool["b n"] | None = None, # noqa: F722 + rope=None, # rotary position embedding for x + c_rope=None, # rotary position embedding for c + ) -> torch.Tensor: + if c is not None: + return self.processor(self, x, c=c, mask=mask, rope=rope, c_rope=c_rope) + else: + return self.processor(self, x, mask=mask, rope=rope) + + +# Attention processor + + +class AttnProcessor: + def __init__( + self, + pe_attn_head: int | None = None, # number of attention head to apply rope, None for all + ): + self.pe_attn_head = pe_attn_head + + def __call__( + self, + attn: Attention, + x: float["b n d"], # noised input x # noqa: F722 + mask: bool["b n"] | None = None, # noqa: F722 + rope=None, # rotary position embedding + ) -> torch.FloatTensor: + batch_size = x.shape[0] + + # `sample` projections + query = attn.to_q(x) + key = attn.to_k(x) + value = attn.to_v(x) + + # attention + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # qk norm + if attn.q_norm is not None: + query = attn.q_norm(query) + if attn.k_norm is not None: + key = attn.k_norm(key) + + # apply rotary position embedding + if rope is not None: + freqs, xpos_scale = rope + q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0) + + if self.pe_attn_head is not None: + pn = self.pe_attn_head + query[:, :pn, :, :] = apply_rotary_pos_emb(query[:, :pn, :, :], freqs, q_xpos_scale) + key[:, :pn, :, :] = apply_rotary_pos_emb(key[:, :pn, :, :], freqs, k_xpos_scale) + else: + query = apply_rotary_pos_emb(query, freqs, q_xpos_scale) + key = apply_rotary_pos_emb(key, freqs, k_xpos_scale) + + # mask. e.g. inference got a batch with different target durations, mask out the padding + if mask is not None: + attn_mask = mask + attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n' + attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2]) + else: + attn_mask = None + + x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False) + x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + x = x.to(query.dtype) + + # linear proj + x = attn.to_out[0](x) + # dropout + x = attn.to_out[1](x) + + if mask is not None: + mask = mask.unsqueeze(-1) + x = x.masked_fill(~mask, 0.0) + + return x + + +# Joint Attention processor for MM-DiT +# modified from diffusers/src/diffusers/models/attention_processor.py + + +class JointAttnProcessor: + def __init__(self): + pass + + def __call__( + self, + attn: Attention, + x: float["b n d"], # noised input x # noqa: F722 + c: float["b nt d"] = None, # context c, here text # noqa: F722 + mask: bool["b n"] | None = None, # noqa: F722 + rope=None, # rotary position embedding for x + c_rope=None, # rotary position embedding for c + ) -> torch.FloatTensor: + residual = x + + batch_size = c.shape[0] + + # `sample` projections + query = attn.to_q(x) + key = attn.to_k(x) + value = attn.to_v(x) + + # `context` projections + c_query = attn.to_q_c(c) + c_key = attn.to_k_c(c) + c_value = attn.to_v_c(c) + + # attention + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + c_query = c_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + c_key = c_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + c_value = c_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # qk norm + if attn.q_norm is not None: + query = attn.q_norm(query) + if attn.k_norm is not None: + key = attn.k_norm(key) + if attn.c_q_norm is not None: + c_query = attn.c_q_norm(c_query) + if attn.c_k_norm is not None: + c_key = attn.c_k_norm(c_key) + + # apply rope for context and noised input independently + if rope is not None: + freqs, xpos_scale = rope + q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0) + query = apply_rotary_pos_emb(query, freqs, q_xpos_scale) + key = apply_rotary_pos_emb(key, freqs, k_xpos_scale) + if c_rope is not None: + freqs, xpos_scale = c_rope + q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0) + c_query = apply_rotary_pos_emb(c_query, freqs, q_xpos_scale) + c_key = apply_rotary_pos_emb(c_key, freqs, k_xpos_scale) + + # joint attention + query = torch.cat([query, c_query], dim=2) + key = torch.cat([key, c_key], dim=2) + value = torch.cat([value, c_value], dim=2) + + # mask. e.g. inference got a batch with different target durations, mask out the padding + if mask is not None: + attn_mask = F.pad(mask, (0, c.shape[1]), value=True) # no mask for c (text) + attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n' + attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2]) + else: + attn_mask = None + + x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False) + x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + x = x.to(query.dtype) + + # Split the attention outputs. + x, c = ( + x[:, : residual.shape[1]], + x[:, residual.shape[1] :], + ) + + # linear proj + x = attn.to_out[0](x) + # dropout + x = attn.to_out[1](x) + if not attn.context_pre_only: + c = attn.to_out_c(c) + + if mask is not None: + mask = mask.unsqueeze(-1) + x = x.masked_fill(~mask, 0.0) + # c = c.masked_fill(~mask, 0.) # no mask for c (text) + + return x, c + + +# DiT Block + + +class DiTBlock(nn.Module): + def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1, qk_norm=None, pe_attn_head=None): + super().__init__() + + self.attn_norm = AdaLayerNorm(dim) + self.attn = Attention( + processor=AttnProcessor(pe_attn_head=pe_attn_head), + dim=dim, + heads=heads, + dim_head=dim_head, + dropout=dropout, + qk_norm=qk_norm, + ) + + self.ff_norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh") + + def forward(self, x, t, mask=None, rope=None): # x: noised input, t: time embedding + # pre-norm & modulation for attention input + norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(x, emb=t) + + # attention + attn_output = self.attn(x=norm, mask=mask, rope=rope) + + # process attention output for input x + x = x + gate_msa.unsqueeze(1) * attn_output + + norm = self.ff_norm(x) * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + ff_output = self.ff(norm) + x = x + gate_mlp.unsqueeze(1) * ff_output + + return x + + +# MMDiT Block https://arxiv.org/abs/2403.03206 + + +class MMDiTBlock(nn.Module): + r""" + modified from diffusers/src/diffusers/models/attention.py + + notes. + _c: context related. text, cond, etc. (left part in sd3 fig2.b) + _x: noised input related. (right part) + context_pre_only: last layer only do prenorm + modulation cuz no more ffn + """ + + def __init__( + self, dim, heads, dim_head, ff_mult=4, dropout=0.1, context_dim=None, context_pre_only=False, qk_norm=None + ): + super().__init__() + if context_dim is None: + context_dim = dim + self.context_pre_only = context_pre_only + + self.attn_norm_c = AdaLayerNorm_Final(context_dim) if context_pre_only else AdaLayerNorm(context_dim) + self.attn_norm_x = AdaLayerNorm(dim) + self.attn = Attention( + processor=JointAttnProcessor(), + dim=dim, + heads=heads, + dim_head=dim_head, + dropout=dropout, + context_dim=context_dim, + context_pre_only=context_pre_only, + qk_norm=qk_norm, + ) + + if not context_pre_only: + self.ff_norm_c = nn.LayerNorm(context_dim, elementwise_affine=False, eps=1e-6) + self.ff_c = FeedForward(dim=context_dim, mult=ff_mult, dropout=dropout, approximate="tanh") + else: + self.ff_norm_c = None + self.ff_c = None + self.ff_norm_x = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff_x = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh") + + def forward(self, x, c, t, mask=None, rope=None, c_rope=None): # x: noised input, c: context, t: time embedding + # pre-norm & modulation for attention input + if self.context_pre_only: + norm_c = self.attn_norm_c(c, t) + else: + norm_c, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.attn_norm_c(c, emb=t) + norm_x, x_gate_msa, x_shift_mlp, x_scale_mlp, x_gate_mlp = self.attn_norm_x(x, emb=t) + + # attention + x_attn_output, c_attn_output = self.attn(x=norm_x, c=norm_c, mask=mask, rope=rope, c_rope=c_rope) + + # process attention output for context c + if self.context_pre_only: + c = None + else: # if not last layer + c = c + c_gate_msa.unsqueeze(1) * c_attn_output + + norm_c = self.ff_norm_c(c) * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] + c_ff_output = self.ff_c(norm_c) + c = c + c_gate_mlp.unsqueeze(1) * c_ff_output + + # process attention output for input x + x = x + x_gate_msa.unsqueeze(1) * x_attn_output + + norm_x = self.ff_norm_x(x) * (1 + x_scale_mlp[:, None]) + x_shift_mlp[:, None] + x_ff_output = self.ff_x(norm_x) + x = x + x_gate_mlp.unsqueeze(1) * x_ff_output + + return c, x + + +# time step conditioning embedding + + +class TimestepEmbedding(nn.Module): + def __init__(self, dim, freq_embed_dim=256): + super().__init__() + self.time_embed = SinusPositionEmbedding(freq_embed_dim) + self.time_mlp = nn.Sequential(nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)) + + def forward(self, timestep: float["b"]): # noqa: F821 + time_hidden = self.time_embed(timestep) + time_hidden = time_hidden.to(timestep.dtype) + time = self.time_mlp(time_hidden) # b d + return time diff --git a/src/f5_tts/model/trainer.py b/src/f5_tts/model/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ab4a8c76de7ac7c89ea29e43ef2b79f0da1862 --- /dev/null +++ b/src/f5_tts/model/trainer.py @@ -0,0 +1,437 @@ +from __future__ import annotations + +import gc +import math +import os + +import torch +import torchaudio +import wandb +from accelerate import Accelerator +from accelerate.utils import DistributedDataParallelKwargs +from ema_pytorch import EMA +from torch.optim import AdamW +from torch.optim.lr_scheduler import LinearLR, SequentialLR +from torch.utils.data import DataLoader, Dataset, SequentialSampler +from tqdm import tqdm + +from f5_tts.model import CFM +from f5_tts.model.dataset import DynamicBatchSampler, collate_fn +from f5_tts.model.utils import default, exists + +# trainer + + +class Trainer: + def __init__( + self, + model: CFM, + epochs, + learning_rate, + num_warmup_updates=20000, + save_per_updates=1000, + keep_last_n_checkpoints: int = -1, # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints + checkpoint_path=None, + batch_size_per_gpu=32, + batch_size_type: str = "sample", + max_samples=32, + grad_accumulation_steps=1, + max_grad_norm=1.0, + noise_scheduler: str | None = None, + duration_predictor: torch.nn.Module | None = None, + logger: str | None = "wandb", # "wandb" | "tensorboard" | None + wandb_project="test_f5-tts", + wandb_run_name="test_run", + wandb_resume_id: str = None, + log_samples: bool = False, + last_per_updates=None, + accelerate_kwargs: dict = dict(), + ema_kwargs: dict = dict(), + bnb_optimizer: bool = False, + mel_spec_type: str = "vocos", # "vocos" | "bigvgan" + is_local_vocoder: bool = False, # use local path vocoder + local_vocoder_path: str = "", # local vocoder path + cfg_dict: dict = dict(), # training config + ): + ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + + if logger == "wandb" and not wandb.api.api_key: + logger = None + self.log_samples = log_samples + + self.accelerator = Accelerator( + log_with=logger if logger == "wandb" else None, + kwargs_handlers=[ddp_kwargs], + gradient_accumulation_steps=grad_accumulation_steps, + **accelerate_kwargs, + ) + + self.logger = logger + if self.logger == "wandb": + if exists(wandb_resume_id): + init_kwargs = {"wandb": {"resume": "allow", "name": wandb_run_name, "id": wandb_resume_id}} + else: + init_kwargs = {"wandb": {"resume": "allow", "name": wandb_run_name}} + + if not cfg_dict: + cfg_dict = { + "epochs": epochs, + "learning_rate": learning_rate, + "num_warmup_updates": num_warmup_updates, + "batch_size_per_gpu": batch_size_per_gpu, + "batch_size_type": batch_size_type, + "max_samples": max_samples, + "grad_accumulation_steps": grad_accumulation_steps, + "max_grad_norm": max_grad_norm, + "noise_scheduler": noise_scheduler, + } + cfg_dict["gpus"] = self.accelerator.num_processes + self.accelerator.init_trackers( + project_name=wandb_project, + init_kwargs=init_kwargs, + config=cfg_dict, + ) + + elif self.logger == "tensorboard": + from torch.utils.tensorboard import SummaryWriter + + self.writer = SummaryWriter(log_dir=f"runs/{wandb_run_name}") + + self.model = model + + if self.is_main: + self.ema_model = EMA(model, include_online_model=False, **ema_kwargs) + self.ema_model.to(self.accelerator.device) + + print(f"Using logger: {logger}") + if grad_accumulation_steps > 1: + print( + "Gradient accumulation checkpointing with per_updates now, old logic per_steps used with before f992c4e" + ) + + self.epochs = epochs + self.num_warmup_updates = num_warmup_updates + self.save_per_updates = save_per_updates + self.keep_last_n_checkpoints = keep_last_n_checkpoints + self.last_per_updates = default(last_per_updates, save_per_updates) + self.checkpoint_path = default(checkpoint_path, "ckpts/test_f5-tts") + + self.batch_size_per_gpu = batch_size_per_gpu + self.batch_size_type = batch_size_type + self.max_samples = max_samples + self.grad_accumulation_steps = grad_accumulation_steps + self.max_grad_norm = max_grad_norm + + # mel vocoder config + self.vocoder_name = mel_spec_type + self.is_local_vocoder = is_local_vocoder + self.local_vocoder_path = local_vocoder_path + + self.noise_scheduler = noise_scheduler + + self.duration_predictor = duration_predictor + + if bnb_optimizer: + import bitsandbytes as bnb + + self.optimizer = bnb.optim.AdamW8bit(model.parameters(), lr=learning_rate) + else: + self.optimizer = AdamW(model.parameters(), lr=learning_rate) + self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) + + @property + def is_main(self): + return self.accelerator.is_main_process + + def save_checkpoint(self, update, last=False): + self.accelerator.wait_for_everyone() + if self.is_main: + checkpoint = dict( + model_state_dict=self.accelerator.unwrap_model(self.model).state_dict(), + optimizer_state_dict=self.accelerator.unwrap_model(self.optimizer).state_dict(), + ema_model_state_dict=self.ema_model.state_dict(), + scheduler_state_dict=self.scheduler.state_dict(), + update=update, + ) + if not os.path.exists(self.checkpoint_path): + os.makedirs(self.checkpoint_path) + if last: + self.accelerator.save(checkpoint, f"{self.checkpoint_path}/model_last.pt") + print(f"Saved last checkpoint at update {update}") + else: + if self.keep_last_n_checkpoints == 0: + return + self.accelerator.save(checkpoint, f"{self.checkpoint_path}/model_{update}.pt") + if self.keep_last_n_checkpoints > 0: + # Updated logic to exclude pretrained model from rotation + checkpoints = [ + f + for f in os.listdir(self.checkpoint_path) + if f.startswith("model_") + and not f.startswith("pretrained_") # Exclude pretrained models + and f.endswith(".pt") + and f != "model_last.pt" + ] + checkpoints.sort(key=lambda x: int(x.split("_")[1].split(".")[0])) + while len(checkpoints) > self.keep_last_n_checkpoints: + oldest_checkpoint = checkpoints.pop(0) + os.remove(os.path.join(self.checkpoint_path, oldest_checkpoint)) + print(f"Removed old checkpoint: {oldest_checkpoint}") + + def load_checkpoint(self): + if ( + not exists(self.checkpoint_path) + or not os.path.exists(self.checkpoint_path) + or not any(filename.endswith((".pt", ".safetensors")) for filename in os.listdir(self.checkpoint_path)) + ): + return 0 + + self.accelerator.wait_for_everyone() + if "model_last.pt" in os.listdir(self.checkpoint_path): + latest_checkpoint = "model_last.pt" + else: + # Updated to consider pretrained models for loading but prioritize training checkpoints + all_checkpoints = [ + f + for f in os.listdir(self.checkpoint_path) + if (f.startswith("model_") or f.startswith("pretrained_")) and f.endswith((".pt", ".safetensors")) + ] + + # First try to find regular training checkpoints + training_checkpoints = [f for f in all_checkpoints if f.startswith("model_") and f != "model_last.pt"] + if training_checkpoints: + latest_checkpoint = sorted( + training_checkpoints, + key=lambda x: int("".join(filter(str.isdigit, x))), + )[-1] + else: + # If no training checkpoints, use pretrained model + latest_checkpoint = next(f for f in all_checkpoints if f.startswith("pretrained_")) + + if latest_checkpoint.endswith(".safetensors"): # always a pretrained checkpoint + from safetensors.torch import load_file + + checkpoint = load_file(f"{self.checkpoint_path}/{latest_checkpoint}", device="cpu") + checkpoint = {"ema_model_state_dict": checkpoint} + elif latest_checkpoint.endswith(".pt"): + # checkpoint = torch.load(f"{self.checkpoint_path}/{latest_checkpoint}", map_location=self.accelerator.device) # rather use accelerator.load_state ಥ_ಥ + checkpoint = torch.load( + f"{self.checkpoint_path}/{latest_checkpoint}", weights_only=True, map_location="cpu" + ) + + # patch for backward compatibility, 305e3ea + for key in ["ema_model.mel_spec.mel_stft.mel_scale.fb", "ema_model.mel_spec.mel_stft.spectrogram.window"]: + if key in checkpoint["ema_model_state_dict"]: + del checkpoint["ema_model_state_dict"][key] + + if self.is_main: + self.ema_model.load_state_dict(checkpoint["ema_model_state_dict"]) + + if "update" in checkpoint or "step" in checkpoint: + # patch for backward compatibility, with before f992c4e + if "step" in checkpoint: + checkpoint["update"] = checkpoint["step"] // self.grad_accumulation_steps + if self.grad_accumulation_steps > 1 and self.is_main: + print( + "F5-TTS WARNING: Loading checkpoint saved with per_steps logic (before f992c4e), will convert to per_updates according to grad_accumulation_steps setting, may have unexpected behaviour." + ) + # patch for backward compatibility, 305e3ea + for key in ["mel_spec.mel_stft.mel_scale.fb", "mel_spec.mel_stft.spectrogram.window"]: + if key in checkpoint["model_state_dict"]: + del checkpoint["model_state_dict"][key] + + self.accelerator.unwrap_model(self.model).load_state_dict(checkpoint["model_state_dict"]) + self.accelerator.unwrap_model(self.optimizer).load_state_dict(checkpoint["optimizer_state_dict"]) + if self.scheduler: + self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"]) + update = checkpoint["update"] + else: + checkpoint["model_state_dict"] = { + k.replace("ema_model.", ""): v + for k, v in checkpoint["ema_model_state_dict"].items() + if k not in ["initted", "update", "step"] + } + self.accelerator.unwrap_model(self.model).load_state_dict(checkpoint["model_state_dict"]) + update = 0 + + del checkpoint + gc.collect() + return update + + def train(self, train_dataset: Dataset, num_workers=16, resumable_with_seed: int = None): + if self.log_samples: + from f5_tts.infer.utils_infer import cfg_strength, load_vocoder, nfe_step, sway_sampling_coef + + vocoder = load_vocoder( + vocoder_name=self.vocoder_name, is_local=self.is_local_vocoder, local_path=self.local_vocoder_path + ) + target_sample_rate = self.accelerator.unwrap_model(self.model).mel_spec.target_sample_rate + log_samples_path = f"{self.checkpoint_path}/samples" + os.makedirs(log_samples_path, exist_ok=True) + + if exists(resumable_with_seed): + generator = torch.Generator() + generator.manual_seed(resumable_with_seed) + else: + generator = None + + if self.batch_size_type == "sample": + train_dataloader = DataLoader( + train_dataset, + collate_fn=collate_fn, + num_workers=num_workers, + pin_memory=True, + persistent_workers=True, + batch_size=self.batch_size_per_gpu, + shuffle=True, + generator=generator, + ) + elif self.batch_size_type == "frame": + self.accelerator.even_batches = False + sampler = SequentialSampler(train_dataset) + batch_sampler = DynamicBatchSampler( + sampler, + self.batch_size_per_gpu, + max_samples=self.max_samples, + random_seed=resumable_with_seed, # This enables reproducible shuffling + drop_residual=False, + ) + train_dataloader = DataLoader( + train_dataset, + collate_fn=collate_fn, + num_workers=num_workers, + pin_memory=True, + persistent_workers=True, + batch_sampler=batch_sampler, + ) + else: + raise ValueError(f"batch_size_type must be either 'sample' or 'frame', but received {self.batch_size_type}") + + # accelerator.prepare() dispatches batches to devices; + # which means the length of dataloader calculated before, should consider the number of devices + warmup_updates = ( + self.num_warmup_updates * self.accelerator.num_processes + ) # consider a fixed warmup steps while using accelerate multi-gpu ddp + # otherwise by default with split_batches=False, warmup steps change with num_processes + total_updates = math.ceil(len(train_dataloader) / self.grad_accumulation_steps) * self.epochs + decay_updates = total_updates - warmup_updates + warmup_scheduler = LinearLR(self.optimizer, start_factor=1e-8, end_factor=1.0, total_iters=warmup_updates) + decay_scheduler = LinearLR(self.optimizer, start_factor=1.0, end_factor=1e-8, total_iters=decay_updates) + self.scheduler = SequentialLR( + self.optimizer, schedulers=[warmup_scheduler, decay_scheduler], milestones=[warmup_updates] + ) + train_dataloader, self.scheduler = self.accelerator.prepare( + train_dataloader, self.scheduler + ) # actual multi_gpu updates = single_gpu updates / gpu nums + start_update = self.load_checkpoint() + global_update = start_update + + if exists(resumable_with_seed): + orig_epoch_step = len(train_dataloader) + start_step = start_update * self.grad_accumulation_steps + skipped_epoch = int(start_step // orig_epoch_step) + skipped_batch = start_step % orig_epoch_step + skipped_dataloader = self.accelerator.skip_first_batches(train_dataloader, num_batches=skipped_batch) + else: + skipped_epoch = 0 + + for epoch in range(skipped_epoch, self.epochs): + self.model.train() + if exists(resumable_with_seed) and epoch == skipped_epoch: + progress_bar_initial = math.ceil(skipped_batch / self.grad_accumulation_steps) + current_dataloader = skipped_dataloader + else: + progress_bar_initial = 0 + current_dataloader = train_dataloader + + # Set epoch for the batch sampler if it exists + if hasattr(train_dataloader, "batch_sampler") and hasattr(train_dataloader.batch_sampler, "set_epoch"): + train_dataloader.batch_sampler.set_epoch(epoch) + + progress_bar = tqdm( + range(math.ceil(len(train_dataloader) / self.grad_accumulation_steps)), + desc=f"Epoch {epoch+1}/{self.epochs}", + unit="update", + disable=not self.accelerator.is_local_main_process, + initial=progress_bar_initial, + ) + + for batch in current_dataloader: + with self.accelerator.accumulate(self.model): + text_inputs = batch["text"] + mel_spec = batch["mel"].permute(0, 2, 1) + mel_lengths = batch["mel_lengths"] + + # TODO. add duration predictor training + if self.duration_predictor is not None and self.accelerator.is_local_main_process: + dur_loss = self.duration_predictor(mel_spec, lens=batch.get("durations")) + self.accelerator.log({"duration loss": dur_loss.item()}, step=global_update) + + loss, cond, pred = self.model( + mel_spec, text=text_inputs, lens=mel_lengths, noise_scheduler=self.noise_scheduler + ) + self.accelerator.backward(loss) + + if self.max_grad_norm > 0 and self.accelerator.sync_gradients: + self.accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm) + + self.optimizer.step() + self.scheduler.step() + self.optimizer.zero_grad() + + if self.accelerator.sync_gradients: + if self.is_main: + self.ema_model.update() + + global_update += 1 + progress_bar.update(1) + progress_bar.set_postfix(update=str(global_update), loss=loss.item()) + + if self.accelerator.is_local_main_process: + self.accelerator.log( + {"loss": loss.item(), "lr": self.scheduler.get_last_lr()[0]}, step=global_update + ) + if self.logger == "tensorboard": + self.writer.add_scalar("loss", loss.item(), global_update) + self.writer.add_scalar("lr", self.scheduler.get_last_lr()[0], global_update) + + if global_update % self.save_per_updates == 0 and self.accelerator.sync_gradients: + self.save_checkpoint(global_update) + + if self.log_samples and self.accelerator.is_local_main_process: + ref_audio_len = mel_lengths[0] + infer_text = [ + text_inputs[0] + ([" "] if isinstance(text_inputs[0], list) else " ") + text_inputs[0] + ] + with torch.inference_mode(): + generated, _ = self.accelerator.unwrap_model(self.model).sample( + cond=mel_spec[0][:ref_audio_len].unsqueeze(0), + text=infer_text, + duration=ref_audio_len * 2, + steps=nfe_step, + cfg_strength=cfg_strength, + sway_sampling_coef=sway_sampling_coef, + ) + generated = generated.to(torch.float32) + gen_mel_spec = generated[:, ref_audio_len:, :].permute(0, 2, 1).to(self.accelerator.device) + ref_mel_spec = batch["mel"][0].unsqueeze(0) + if self.vocoder_name == "vocos": + gen_audio = vocoder.decode(gen_mel_spec).cpu() + ref_audio = vocoder.decode(ref_mel_spec).cpu() + elif self.vocoder_name == "bigvgan": + gen_audio = vocoder(gen_mel_spec).squeeze(0).cpu() + ref_audio = vocoder(ref_mel_spec).squeeze(0).cpu() + + torchaudio.save( + f"{log_samples_path}/update_{global_update}_gen.wav", gen_audio, target_sample_rate + ) + torchaudio.save( + f"{log_samples_path}/update_{global_update}_ref.wav", ref_audio, target_sample_rate + ) + + if global_update % self.last_per_updates == 0 and self.accelerator.sync_gradients: + self.save_checkpoint(global_update, last=True) + + self.save_checkpoint(global_update, last=True) + + self.accelerator.end_training() diff --git a/src/f5_tts/model/utils.py b/src/f5_tts/model/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e8811274dc25245703e78048314a98b69afdcedd --- /dev/null +++ b/src/f5_tts/model/utils.py @@ -0,0 +1,192 @@ +from __future__ import annotations + +import os +import random +from collections import defaultdict +from importlib.resources import files + +import torch +from torch.nn.utils.rnn import pad_sequence + +import jieba +from pypinyin import lazy_pinyin, Style + + +# seed everything + + +def seed_everything(seed=0): + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +# helpers + + +def exists(v): + return v is not None + + +def default(v, d): + return v if exists(v) else d + + +# tensor helpers + + +def lens_to_mask(t: int["b"], length: int | None = None) -> bool["b n"]: # noqa: F722 F821 + if not exists(length): + length = t.amax() + + seq = torch.arange(length, device=t.device) + return seq[None, :] < t[:, None] + + +def mask_from_start_end_indices(seq_len: int["b"], start: int["b"], end: int["b"]): # noqa: F722 F821 + max_seq_len = seq_len.max().item() + seq = torch.arange(max_seq_len, device=start.device).long() + start_mask = seq[None, :] >= start[:, None] + end_mask = seq[None, :] < end[:, None] + return start_mask & end_mask + + +def mask_from_frac_lengths(seq_len: int["b"], frac_lengths: float["b"]): # noqa: F722 F821 + lengths = (frac_lengths * seq_len).long() + max_start = seq_len - lengths + + rand = torch.rand_like(frac_lengths) + start = (max_start * rand).long().clamp(min=0) + end = start + lengths + + return mask_from_start_end_indices(seq_len, start, end) + + +def maybe_masked_mean(t: float["b n d"], mask: bool["b n"] = None) -> float["b d"]: # noqa: F722 + if not exists(mask): + return t.mean(dim=1) + + t = torch.where(mask[:, :, None], t, torch.tensor(0.0, device=t.device)) + num = t.sum(dim=1) + den = mask.float().sum(dim=1) + + return num / den.clamp(min=1.0) + + +# simple utf-8 tokenizer, since paper went character based +def list_str_to_tensor(text: list[str], padding_value=-1) -> int["b nt"]: # noqa: F722 + list_tensors = [torch.tensor([*bytes(t, "UTF-8")]) for t in text] # ByT5 style + text = pad_sequence(list_tensors, padding_value=padding_value, batch_first=True) + return text + + +# char tokenizer, based on custom dataset's extracted .txt file +def list_str_to_idx( + text: list[str] | list[list[str]], + vocab_char_map: dict[str, int], # {char: idx} + padding_value=-1, +) -> int["b nt"]: # noqa: F722 + list_idx_tensors = [torch.tensor([vocab_char_map.get(c, 0) for c in t]) for t in text] # pinyin or char style + text = pad_sequence(list_idx_tensors, padding_value=padding_value, batch_first=True) + return text + + +# Get tokenizer + + +def get_tokenizer(dataset_name, tokenizer: str = "pinyin"): + """ + tokenizer - "pinyin" do g2p for only chinese characters, need .txt vocab_file + - "char" for char-wise tokenizer, need .txt vocab_file + - "byte" for utf-8 tokenizer + - "custom" if you're directly passing in a path to the vocab.txt you want to use + vocab_size - if use "pinyin", all available pinyin types, common alphabets (also those with accent) and symbols + - if use "char", derived from unfiltered character & symbol counts of custom dataset + - if use "byte", set to 256 (unicode byte range) + """ + if tokenizer in ["pinyin", "char"]: + tokenizer_path = os.path.join(files("f5_tts").joinpath("../../data"), f"{dataset_name}/vocab.txt") + with open(tokenizer_path, "r", encoding="utf-8") as f: + vocab_char_map = {} + for i, char in enumerate(f): + vocab_char_map[char[:-1]] = i + vocab_size = len(vocab_char_map) + assert vocab_char_map[" "] == 0, "make sure space is of idx 0 in vocab.txt, cuz 0 is used for unknown char" + + elif tokenizer == "byte": + vocab_char_map = None + vocab_size = 256 + + elif tokenizer == "custom": + with open(dataset_name, "r", encoding="utf-8") as f: + vocab_char_map = {} + for i, char in enumerate(f): + vocab_char_map[char[:-1]] = i + vocab_size = len(vocab_char_map) + + return vocab_char_map, vocab_size + + +# convert char to pinyin + + +def convert_char_to_pinyin(text_list, polyphone=True): + if jieba.dt.initialized is False: + jieba.default_logger.setLevel(50) # CRITICAL + jieba.initialize() + + final_text_list = [] + custom_trans = str.maketrans( + {";": ",", "“": '"', "”": '"', "‘": "'", "’": "'"} + ) # add custom trans here, to address oov + + def is_chinese(c): + return ( + "\u3100" <= c <= "\u9fff" # common chinese characters + ) + + for text in text_list: + char_list = [] + text = text.translate(custom_trans) + for seg in jieba.cut(text): + seg_byte_len = len(bytes(seg, "UTF-8")) + if seg_byte_len == len(seg): # if pure alphabets and symbols + if char_list and seg_byte_len > 1 and char_list[-1] not in " :'\"": + char_list.append(" ") + char_list.extend(seg) + elif polyphone and seg_byte_len == 3 * len(seg): # if pure east asian characters + seg_ = lazy_pinyin(seg, style=Style.TONE3, tone_sandhi=True) + for i, c in enumerate(seg): + if is_chinese(c): + char_list.append(" ") + char_list.append(seg_[i]) + else: # if mixed characters, alphabets and symbols + for c in seg: + if ord(c) < 256: + char_list.extend(c) + elif is_chinese(c): + char_list.append(" ") + char_list.extend(lazy_pinyin(c, style=Style.TONE3, tone_sandhi=True)) + else: + char_list.append(c) + final_text_list.append(char_list) + + return final_text_list + + +# filter func for dirty data with many repetitions + + +def repetition_found(text, length=2, tolerance=10): + pattern_count = defaultdict(int) + for i in range(len(text) - length + 1): + pattern = text[i : i + length] + pattern_count[pattern] += 1 + for pattern, count in pattern_count.items(): + if count > tolerance: + return True + return False diff --git a/src/f5_tts/scripts/count_max_epoch.py b/src/f5_tts/scripts/count_max_epoch.py new file mode 100644 index 0000000000000000000000000000000000000000..fe291e52f636aea1b61eabca6d3279a33e664c94 --- /dev/null +++ b/src/f5_tts/scripts/count_max_epoch.py @@ -0,0 +1,33 @@ +"""ADAPTIVE BATCH SIZE""" + +print("Adaptive batch size: using grouping batch sampler, frames_per_gpu fixed fed in") +print(" -> least padding, gather wavs with accumulated frames in a batch\n") + +# data +total_hours = 95282 +mel_hop_length = 256 +mel_sampling_rate = 24000 + +# target +wanted_max_updates = 1200000 + +# train params +gpus = 8 +frames_per_gpu = 38400 # 8 * 38400 = 307200 +grad_accum = 1 + +# intermediate +mini_batch_frames = frames_per_gpu * grad_accum * gpus +mini_batch_hours = mini_batch_frames * mel_hop_length / mel_sampling_rate / 3600 +updates_per_epoch = total_hours / mini_batch_hours +# steps_per_epoch = updates_per_epoch * grad_accum + +# result +epochs = wanted_max_updates / updates_per_epoch +print(f"epochs should be set to: {epochs:.0f} ({epochs/grad_accum:.1f} x gd_acum {grad_accum})") +print(f"progress_bar should show approx. 0/{updates_per_epoch:.0f} updates") +# print(f" or approx. 0/{steps_per_epoch:.0f} steps") + +# others +print(f"total {total_hours:.0f} hours") +print(f"mini-batch of {mini_batch_frames:.0f} frames, {mini_batch_hours:.2f} hours per mini-batch") diff --git a/src/f5_tts/scripts/count_params_gflops.py b/src/f5_tts/scripts/count_params_gflops.py new file mode 100644 index 0000000000000000000000000000000000000000..05d7ced0176260081f79c1e57abdebd79c362315 --- /dev/null +++ b/src/f5_tts/scripts/count_params_gflops.py @@ -0,0 +1,39 @@ +import sys +import os + +sys.path.append(os.getcwd()) + +from f5_tts.model import CFM, DiT + +import torch +import thop + + +""" ~155M """ +# transformer = UNetT(dim = 768, depth = 20, heads = 12, ff_mult = 4) +# transformer = UNetT(dim = 768, depth = 20, heads = 12, ff_mult = 4, text_dim = 512, conv_layers = 4) +# transformer = DiT(dim = 768, depth = 18, heads = 12, ff_mult = 2) +# transformer = DiT(dim = 768, depth = 18, heads = 12, ff_mult = 2, text_dim = 512, conv_layers = 4) +# transformer = DiT(dim = 768, depth = 18, heads = 12, ff_mult = 2, text_dim = 512, conv_layers = 4, long_skip_connection = True) +# transformer = MMDiT(dim = 512, depth = 16, heads = 16, ff_mult = 2) + +""" ~335M """ +# FLOPs: 622.1 G, Params: 333.2 M +# transformer = UNetT(dim = 1024, depth = 24, heads = 16, ff_mult = 4) +# FLOPs: 363.4 G, Params: 335.8 M +transformer = DiT(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4) + + +model = CFM(transformer=transformer) +target_sample_rate = 24000 +n_mel_channels = 100 +hop_length = 256 +duration = 20 +frame_length = int(duration * target_sample_rate / hop_length) +text_length = 150 + +flops, params = thop.profile( + model, inputs=(torch.randn(1, frame_length, n_mel_channels), torch.zeros(1, text_length, dtype=torch.long)) +) +print(f"FLOPs: {flops / 1e9} G") +print(f"Params: {params / 1e6} M") diff --git a/src/f5_tts/socket_client.py b/src/f5_tts/socket_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4cad5e7178eec1758b7d999c64842ee99e410971 --- /dev/null +++ b/src/f5_tts/socket_client.py @@ -0,0 +1,61 @@ +import socket +import asyncio +import pyaudio +import numpy as np +import logging +import time + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +async def listen_to_F5TTS(text, server_ip="localhost", server_port=9998): + client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + await asyncio.get_event_loop().run_in_executor(None, client_socket.connect, (server_ip, int(server_port))) + + start_time = time.time() + first_chunk_time = None + + async def play_audio_stream(): + nonlocal first_chunk_time + p = pyaudio.PyAudio() + stream = p.open(format=pyaudio.paFloat32, channels=1, rate=24000, output=True, frames_per_buffer=2048) + + try: + while True: + data = await asyncio.get_event_loop().run_in_executor(None, client_socket.recv, 8192) + if not data: + break + if data == b"END": + logger.info("End of audio received.") + break + + audio_array = np.frombuffer(data, dtype=np.float32) + stream.write(audio_array.tobytes()) + + if first_chunk_time is None: + first_chunk_time = time.time() + + finally: + stream.stop_stream() + stream.close() + p.terminate() + + logger.info(f"Total time taken: {time.time() - start_time:.4f} seconds") + + try: + data_to_send = f"{text}".encode("utf-8") + await asyncio.get_event_loop().run_in_executor(None, client_socket.sendall, data_to_send) + await play_audio_stream() + + except Exception as e: + logger.error(f"Error in listen_to_F5TTS: {e}") + + finally: + client_socket.close() + + +if __name__ == "__main__": + text_to_send = "As a Reader assistant, I'm familiar with new technology. which are key to its improved performance in terms of both training speed and inference efficiency. Let's break down the components" + + asyncio.run(listen_to_F5TTS(text_to_send)) diff --git a/src/f5_tts/socket_server.py b/src/f5_tts/socket_server.py new file mode 100644 index 0000000000000000000000000000000000000000..344b1d7ab3a3bcaf5a04080252becc39d2bd6fb9 --- /dev/null +++ b/src/f5_tts/socket_server.py @@ -0,0 +1,267 @@ +import argparse +import gc +import logging +import numpy as np +import queue +import socket +import struct +import threading +import traceback +import wave +from importlib.resources import files + +import torch +import torchaudio +from huggingface_hub import hf_hub_download +from omegaconf import OmegaConf + +from f5_tts.model.backbones.dit import DiT # noqa: F401. used for config +from f5_tts.infer.utils_infer import ( + chunk_text, + preprocess_ref_audio_text, + load_vocoder, + load_model, + infer_batch_process, +) + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class AudioFileWriterThread(threading.Thread): + """Threaded file writer to avoid blocking the TTS streaming process.""" + + def __init__(self, output_file, sampling_rate): + super().__init__() + self.output_file = output_file + self.sampling_rate = sampling_rate + self.queue = queue.Queue() + self.stop_event = threading.Event() + self.audio_data = [] + + def run(self): + """Process queued audio data and write it to a file.""" + logger.info("AudioFileWriterThread started.") + with wave.open(self.output_file, "wb") as wf: + wf.setnchannels(1) + wf.setsampwidth(2) + wf.setframerate(self.sampling_rate) + + while not self.stop_event.is_set() or not self.queue.empty(): + try: + chunk = self.queue.get(timeout=0.1) + if chunk is not None: + chunk = np.int16(chunk * 32767) + self.audio_data.append(chunk) + wf.writeframes(chunk.tobytes()) + except queue.Empty: + continue + + def add_chunk(self, chunk): + """Add a new chunk to the queue.""" + self.queue.put(chunk) + + def stop(self): + """Stop writing and ensure all queued data is written.""" + self.stop_event.set() + self.join() + logger.info("Audio writing completed.") + + +class TTSStreamingProcessor: + def __init__(self, model, ckpt_file, vocab_file, ref_audio, ref_text, device=None, dtype=torch.float32): + self.device = device or ( + "cuda" + if torch.cuda.is_available() + else "xpu" + if torch.xpu.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" + ) + model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{model}.yaml"))) + self.model_cls = globals()[model_cfg.model.backbone] + self.model_arc = model_cfg.model.arch + self.mel_spec_type = model_cfg.model.mel_spec.mel_spec_type + self.sampling_rate = model_cfg.model.mel_spec.target_sample_rate + + self.model = self.load_ema_model(ckpt_file, vocab_file, dtype) + self.vocoder = self.load_vocoder_model() + + self.update_reference(ref_audio, ref_text) + self._warm_up() + self.file_writer_thread = None + self.first_package = True + + def load_ema_model(self, ckpt_file, vocab_file, dtype): + return load_model( + self.model_cls, + self.model_arc, + ckpt_path=ckpt_file, + mel_spec_type=self.mel_spec_type, + vocab_file=vocab_file, + ode_method="euler", + use_ema=True, + device=self.device, + ).to(self.device, dtype=dtype) + + def load_vocoder_model(self): + return load_vocoder(vocoder_name=self.mel_spec_type, is_local=False, local_path=None, device=self.device) + + def update_reference(self, ref_audio, ref_text): + self.ref_audio, self.ref_text = preprocess_ref_audio_text(ref_audio, ref_text) + self.audio, self.sr = torchaudio.load(self.ref_audio) + + ref_audio_duration = self.audio.shape[-1] / self.sr + ref_text_byte_len = len(self.ref_text.encode("utf-8")) + self.max_chars = int(ref_text_byte_len / (ref_audio_duration) * (25 - ref_audio_duration)) + self.few_chars = int(ref_text_byte_len / (ref_audio_duration) * (25 - ref_audio_duration) / 2) + self.min_chars = int(ref_text_byte_len / (ref_audio_duration) * (25 - ref_audio_duration) / 4) + + def _warm_up(self): + logger.info("Warming up the model...") + gen_text = "Warm-up text for the model." + for _ in infer_batch_process( + (self.audio, self.sr), + self.ref_text, + [gen_text], + self.model, + self.vocoder, + progress=None, + device=self.device, + streaming=True, + ): + pass + logger.info("Warm-up completed.") + + def generate_stream(self, text, conn): + text_batches = chunk_text(text, max_chars=self.max_chars) + if self.first_package: + text_batches = chunk_text(text_batches[0], max_chars=self.few_chars) + text_batches[1:] + text_batches = chunk_text(text_batches[0], max_chars=self.min_chars) + text_batches[1:] + self.first_package = False + + audio_stream = infer_batch_process( + (self.audio, self.sr), + self.ref_text, + text_batches, + self.model, + self.vocoder, + progress=None, + device=self.device, + streaming=True, + chunk_size=2048, + ) + + # Reset the file writer thread + if self.file_writer_thread is not None: + self.file_writer_thread.stop() + self.file_writer_thread = AudioFileWriterThread("output.wav", self.sampling_rate) + self.file_writer_thread.start() + + for audio_chunk, _ in audio_stream: + if len(audio_chunk) > 0: + logger.info(f"Generated audio chunk of size: {len(audio_chunk)}") + + # Send audio chunk via socket + conn.sendall(struct.pack(f"{len(audio_chunk)}f", *audio_chunk)) + + # Write to file asynchronously + self.file_writer_thread.add_chunk(audio_chunk) + + logger.info("Finished sending audio stream.") + conn.sendall(b"END") # Send end signal + + # Ensure all audio data is written before exiting + self.file_writer_thread.stop() + + +def handle_client(conn, processor): + try: + with conn: + conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + while True: + data = conn.recv(1024) + if not data: + processor.first_package = True + break + data_str = data.decode("utf-8").strip() + logger.info(f"Received text: {data_str}") + + try: + processor.generate_stream(data_str, conn) + except Exception as inner_e: + logger.error(f"Error during processing: {inner_e}") + traceback.print_exc() + break + except Exception as e: + logger.error(f"Error handling client: {e}") + traceback.print_exc() + + +def start_server(host, port, processor): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind((host, port)) + s.listen() + logger.info(f"Server started on {host}:{port}") + while True: + conn, addr = s.accept() + logger.info(f"Connected by {addr}") + handle_client(conn, processor) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("--host", default="0.0.0.0") + parser.add_argument("--port", default=9998) + + parser.add_argument( + "--model", + default="F5TTS_v1_Base", + help="The model name, e.g. F5TTS_v1_Base", + ) + parser.add_argument( + "--ckpt_file", + default=str(hf_hub_download(repo_id="SWivid/F5-TTS", filename="F5TTS_v1_Base/model_1250000.safetensors")), + help="Path to the model checkpoint file", + ) + parser.add_argument( + "--vocab_file", + default="", + help="Path to the vocab file if customized", + ) + + parser.add_argument( + "--ref_audio", + default=str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")), + help="Reference audio to provide model with speaker characteristics", + ) + parser.add_argument( + "--ref_text", + default="", + help="Reference audio subtitle, leave empty to auto-transcribe", + ) + + parser.add_argument("--device", default=None, help="Device to run the model on") + parser.add_argument("--dtype", default=torch.float32, help="Data type to use for model inference") + + args = parser.parse_args() + + try: + # Initialize the processor with the model and vocoder + processor = TTSStreamingProcessor( + model=args.model, + ckpt_file=args.ckpt_file, + vocab_file=args.vocab_file, + ref_audio=args.ref_audio, + ref_text=args.ref_text, + device=args.device, + dtype=args.dtype, + ) + + # Start the server + start_server(args.host, args.port, processor) + + except KeyboardInterrupt: + gc.collect() diff --git a/src/f5_tts/train/README.md b/src/f5_tts/train/README.md new file mode 100644 index 0000000000000000000000000000000000000000..25d2380b1388dbd4344fcc76ed3caef843e3f917 --- /dev/null +++ b/src/f5_tts/train/README.md @@ -0,0 +1,82 @@ +# Training + +## Prepare Dataset + +Example data processing scripts, and you may tailor your own one along with a Dataset class in `src/f5_tts/model/dataset.py`. + +### 1. Some specific Datasets preparing scripts +Download corresponding dataset first, and fill in the path in scripts. + +```bash +# Prepare the Emilia dataset +python src/f5_tts/train/datasets/prepare_emilia.py + +# Prepare the Wenetspeech4TTS dataset +python src/f5_tts/train/datasets/prepare_wenetspeech4tts.py + +# Prepare the LibriTTS dataset +python src/f5_tts/train/datasets/prepare_libritts.py + +# Prepare the LJSpeech dataset +python src/f5_tts/train/datasets/prepare_ljspeech.py +``` + +### 2. Create custom dataset with metadata.csv +Use guidance see [#57 here](https://github.com/SWivid/F5-TTS/discussions/57#discussioncomment-10959029). + +```bash +python src/f5_tts/train/datasets/prepare_csv_wavs.py +``` + +## Training & Finetuning + +Once your datasets are prepared, you can start the training process. + +### 1. Training script used for pretrained model + +```bash +# setup accelerate config, e.g. use multi-gpu ddp, fp16 +# will be to: ~/.cache/huggingface/accelerate/default_config.yaml +accelerate config + +# .yaml files are under src/f5_tts/configs directory +accelerate launch src/f5_tts/train/train.py --config-name F5TTS_v1_Base.yaml + +# possible to overwrite accelerate and hydra config +accelerate launch --mixed_precision=fp16 src/f5_tts/train/train.py --config-name F5TTS_v1_Base.yaml ++datasets.batch_size_per_gpu=19200 +``` + +### 2. Finetuning practice +Discussion board for Finetuning [#57](https://github.com/SWivid/F5-TTS/discussions/57). + +Gradio UI training/finetuning with `src/f5_tts/train/finetune_gradio.py` see [#143](https://github.com/SWivid/F5-TTS/discussions/143). + +The `use_ema = True` is harmful for early-stage finetuned checkpoints (which goes just few updates, thus ema weights still dominated by pretrained ones), try turn it off and see if provide better results. + +### 3. W&B Logging + +The `wandb/` dir will be created under path you run training/finetuning scripts. + +By default, the training script does NOT use logging (assuming you didn't manually log in using `wandb login`). + +To turn on wandb logging, you can either: + +1. Manually login with `wandb login`: Learn more [here](https://docs.wandb.ai/ref/cli/wandb-login) +2. Automatically login programmatically by setting an environment variable: Get an API KEY at https://wandb.ai/authorize and set the environment variable as follows: + +On Mac & Linux: + +``` +export WANDB_API_KEY= +``` + +On Windows: + +``` +set WANDB_API_KEY= +``` +Moreover, if you couldn't access W&B and want to log metrics offline, you can set the environment variable as follows: + +``` +export WANDB_MODE=offline +``` diff --git a/src/f5_tts/train/__pycache__/finetune_gradio.cpython-310.pyc b/src/f5_tts/train/__pycache__/finetune_gradio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99749fa9f4e5c0d6410e72267479385a281a7b30 Binary files /dev/null and b/src/f5_tts/train/__pycache__/finetune_gradio.cpython-310.pyc differ diff --git a/src/f5_tts/train/datasets/prepare_csv_wavs.py b/src/f5_tts/train/datasets/prepare_csv_wavs.py new file mode 100644 index 0000000000000000000000000000000000000000..14794d6268e959b9246861609574760a1f2f9166 --- /dev/null +++ b/src/f5_tts/train/datasets/prepare_csv_wavs.py @@ -0,0 +1,284 @@ +import os +import sys +import signal +import subprocess # For invoking ffprobe +import shutil +import concurrent.futures +import multiprocessing +from contextlib import contextmanager + +sys.path.append(os.getcwd()) + +import argparse +import csv +import json +from importlib.resources import files +from pathlib import Path + +import torchaudio +from tqdm import tqdm +from datasets.arrow_writer import ArrowWriter + +from f5_tts.model.utils import ( + convert_char_to_pinyin, +) + + +PRETRAINED_VOCAB_PATH = files("f5_tts").joinpath("../../data/your_training_dataset/vocab.txt") + + +def is_csv_wavs_format(input_dataset_dir): + fpath = Path(input_dataset_dir) + metadata = fpath / "metadata.csv" + wavs = fpath / "wavs" + return metadata.exists() and metadata.is_file() and wavs.exists() and wavs.is_dir() + + +# Configuration constants +BATCH_SIZE = 100 # Batch size for text conversion +MAX_WORKERS = max(1, multiprocessing.cpu_count() - 1) # Leave one CPU free +THREAD_NAME_PREFIX = "AudioProcessor" +CHUNK_SIZE = 100 # Number of files to process per worker batch + +executor = None # Global executor for cleanup + + +@contextmanager +def graceful_exit(): + """Context manager for graceful shutdown on signals""" + + def signal_handler(signum, frame): + print("\nReceived signal to terminate. Cleaning up...") + if executor is not None: + print("Shutting down executor...") + executor.shutdown(wait=False, cancel_futures=True) + sys.exit(1) + + # Set up signal handlers + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + try: + yield + finally: + if executor is not None: + executor.shutdown(wait=False) + + +def process_audio_file(audio_path, text, polyphone): + """Process a single audio file by checking its existence and extracting duration.""" + if not Path(audio_path).exists(): + print(f"audio {audio_path} not found, skipping") + return None + try: + audio_duration = get_audio_duration(audio_path) + if audio_duration <= 0: + raise ValueError(f"Duration {audio_duration} is non-positive.") + return (audio_path, text, audio_duration) + except Exception as e: + print(f"Warning: Failed to process {audio_path} due to error: {e}. Skipping corrupt file.") + return None + + +def batch_convert_texts(texts, polyphone, batch_size=BATCH_SIZE): + """Convert a list of texts to pinyin in batches.""" + converted_texts = [] + for i in range(0, len(texts), batch_size): + batch = texts[i : i + batch_size] + converted_batch = convert_char_to_pinyin(batch, polyphone=polyphone) + converted_texts.extend(converted_batch) + return converted_texts + + +def prepare_csv_wavs_dir(input_dir, num_workers=None): + global executor + assert is_csv_wavs_format(input_dir), f"not csv_wavs format: {input_dir}" + input_dir = Path(input_dir) + metadata_path = input_dir / "metadata.csv" + audio_path_text_pairs = read_audio_text_pairs(metadata_path.as_posix()) + + polyphone = True + total_files = len(audio_path_text_pairs) + + # Use provided worker count or calculate optimal number + worker_count = num_workers if num_workers is not None else min(MAX_WORKERS, total_files) + print(f"\nProcessing {total_files} audio files using {worker_count} workers...") + + with graceful_exit(): + # Initialize thread pool with optimized settings + with concurrent.futures.ThreadPoolExecutor( + max_workers=worker_count, thread_name_prefix=THREAD_NAME_PREFIX + ) as exec: + executor = exec + results = [] + + # Process files in chunks for better efficiency + for i in range(0, len(audio_path_text_pairs), CHUNK_SIZE): + chunk = audio_path_text_pairs[i : i + CHUNK_SIZE] + # Submit futures in order + chunk_futures = [executor.submit(process_audio_file, pair[0], pair[1], polyphone) for pair in chunk] + + # Iterate over futures in the original submission order to preserve ordering + for future in tqdm( + chunk_futures, + total=len(chunk), + desc=f"Processing chunk {i//CHUNK_SIZE + 1}/{(total_files + CHUNK_SIZE - 1)//CHUNK_SIZE}", + ): + try: + result = future.result() + if result is not None: + results.append(result) + except Exception as e: + print(f"Error processing file: {e}") + + executor = None + + # Filter out failed results + processed = [res for res in results if res is not None] + if not processed: + raise RuntimeError("No valid audio files were processed!") + + # Batch process text conversion + raw_texts = [item[1] for item in processed] + converted_texts = batch_convert_texts(raw_texts, polyphone, batch_size=BATCH_SIZE) + + # Prepare final results + sub_result = [] + durations = [] + vocab_set = set() + + for (audio_path, _, duration), conv_text in zip(processed, converted_texts): + sub_result.append({"audio_path": audio_path, "text": conv_text, "duration": duration}) + durations.append(duration) + vocab_set.update(list(conv_text)) + + return sub_result, durations, vocab_set + + +def get_audio_duration(audio_path, timeout=5): + """ + Get the duration of an audio file in seconds using ffmpeg's ffprobe. + Falls back to torchaudio.load() if ffprobe fails. + """ + try: + cmd = [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + audio_path, + ] + result = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, timeout=timeout + ) + duration_str = result.stdout.strip() + if duration_str: + return float(duration_str) + raise ValueError("Empty duration string from ffprobe.") + except (subprocess.TimeoutExpired, subprocess.SubprocessError, ValueError) as e: + print(f"Warning: ffprobe failed for {audio_path} with error: {e}. Falling back to torchaudio.") + try: + audio, sample_rate = torchaudio.load(audio_path) + return audio.shape[1] / sample_rate + except Exception as e: + raise RuntimeError(f"Both ffprobe and torchaudio failed for {audio_path}: {e}") + + +def read_audio_text_pairs(csv_file_path): + audio_text_pairs = [] + + parent = Path(csv_file_path).parent + with open(csv_file_path, mode="r", newline="", encoding="utf-8-sig") as csvfile: + reader = csv.reader(csvfile, delimiter="|") + next(reader) # Skip the header row + for row in reader: + if len(row) >= 2: + audio_file = row[0].strip() # First column: audio file path + text = row[1].strip() # Second column: text + audio_file_path = parent / audio_file + audio_text_pairs.append((audio_file_path.as_posix(), text)) + + return audio_text_pairs + + +def save_prepped_dataset(out_dir, result, duration_list, text_vocab_set, is_finetune): + out_dir = Path(out_dir) + out_dir.mkdir(exist_ok=True, parents=True) + print(f"\nSaving to {out_dir} ...") + + # Save dataset with improved batch size for better I/O performance + raw_arrow_path = out_dir / "raw.arrow" + with ArrowWriter(path=raw_arrow_path.as_posix(), writer_batch_size=100) as writer: + for line in tqdm(result, desc="Writing to raw.arrow ..."): + writer.write(line) + + # Save durations to JSON + dur_json_path = out_dir / "duration.json" + with open(dur_json_path.as_posix(), "w", encoding="utf-8") as f: + json.dump({"duration": duration_list}, f, ensure_ascii=False) + + # Handle vocab file - write only once based on finetune flag + voca_out_path = out_dir / "vocab.txt" + if is_finetune: + file_vocab_finetune = PRETRAINED_VOCAB_PATH.as_posix() + # shutil.copy2(file_vocab_finetune, voca_out_path) # Không cần copy lại vocab, do đã thực hiện ở bước chuẩn bị dữ liệu + else: + with open(voca_out_path.as_posix(), "w") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + + dataset_name = out_dir.stem + print(f"\nFor {dataset_name}, sample count: {len(result)}") + print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}") + print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours") + + +def prepare_and_save_set(inp_dir, out_dir, is_finetune: bool = True, num_workers: int = None): + if is_finetune: + assert PRETRAINED_VOCAB_PATH.exists(), f"pretrained vocab.txt not found: {PRETRAINED_VOCAB_PATH}" + sub_result, durations, vocab_set = prepare_csv_wavs_dir(inp_dir, num_workers=num_workers) + save_prepped_dataset(out_dir, sub_result, durations, vocab_set, is_finetune) + + +def cli(): + try: + # Before processing, check if ffprobe is available. + if shutil.which("ffprobe") is None: + print( + "Warning: ffprobe is not available. Duration extraction will rely on torchaudio (which may be slower)." + ) + + # Usage examples in help text + parser = argparse.ArgumentParser( + description="Prepare and save dataset.", + epilog=""" +Examples: + # For fine-tuning (default): + python prepare_csv_wavs.py /input/dataset/path /output/dataset/path + + # For pre-training: + python prepare_csv_wavs.py /input/dataset/path /output/dataset/path --pretrain + + # With custom worker count: + python prepare_csv_wavs.py /input/dataset/path /output/dataset/path --workers 4 + """, + ) + parser.add_argument("inp_dir", type=str, help="Input directory containing the data.") + parser.add_argument("out_dir", type=str, help="Output directory to save the prepared data.") + parser.add_argument("--pretrain", action="store_true", help="Enable for new pretrain, otherwise is a fine-tune") + parser.add_argument("--workers", type=int, help=f"Number of worker threads (default: {MAX_WORKERS})") + args = parser.parse_args() + + prepare_and_save_set(args.inp_dir, args.out_dir, is_finetune=not args.pretrain, num_workers=args.workers) + except KeyboardInterrupt: + print("\nOperation cancelled by user. Cleaning up...") + if executor is not None: + executor.shutdown(wait=False, cancel_futures=True) + sys.exit(1) + + +if __name__ == "__main__": + cli() diff --git a/src/f5_tts/train/datasets/prepare_emilia.py b/src/f5_tts/train/datasets/prepare_emilia.py new file mode 100644 index 0000000000000000000000000000000000000000..d9b276afa68d671cee69f45cc16d2b12cd0859a4 --- /dev/null +++ b/src/f5_tts/train/datasets/prepare_emilia.py @@ -0,0 +1,230 @@ +# Emilia Dataset: https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07 +# if use updated new version, i.e. WebDataset, feel free to modify / draft your own script + +# generate audio text map for Emilia ZH & EN +# evaluate for vocab size + +import os +import sys + +sys.path.append(os.getcwd()) + +import json +from concurrent.futures import ProcessPoolExecutor +from importlib.resources import files +from pathlib import Path +from tqdm import tqdm + +from datasets.arrow_writer import ArrowWriter + +from f5_tts.model.utils import ( + repetition_found, + convert_char_to_pinyin, +) + + +out_zh = { + "ZH_B00041_S06226", + "ZH_B00042_S09204", + "ZH_B00065_S09430", + "ZH_B00065_S09431", + "ZH_B00066_S09327", + "ZH_B00066_S09328", +} +zh_filters = ["い", "て"] +# seems synthesized audios, or heavily code-switched +out_en = { + "EN_B00013_S00913", + "EN_B00042_S00120", + "EN_B00055_S04111", + "EN_B00061_S00693", + "EN_B00061_S01494", + "EN_B00061_S03375", + "EN_B00059_S00092", + "EN_B00111_S04300", + "EN_B00100_S03759", + "EN_B00087_S03811", + "EN_B00059_S00950", + "EN_B00089_S00946", + "EN_B00078_S05127", + "EN_B00070_S04089", + "EN_B00074_S09659", + "EN_B00061_S06983", + "EN_B00061_S07060", + "EN_B00059_S08397", + "EN_B00082_S06192", + "EN_B00091_S01238", + "EN_B00089_S07349", + "EN_B00070_S04343", + "EN_B00061_S02400", + "EN_B00076_S01262", + "EN_B00068_S06467", + "EN_B00076_S02943", + "EN_B00064_S05954", + "EN_B00061_S05386", + "EN_B00066_S06544", + "EN_B00076_S06944", + "EN_B00072_S08620", + "EN_B00076_S07135", + "EN_B00076_S09127", + "EN_B00065_S00497", + "EN_B00059_S06227", + "EN_B00063_S02859", + "EN_B00075_S01547", + "EN_B00061_S08286", + "EN_B00079_S02901", + "EN_B00092_S03643", + "EN_B00096_S08653", + "EN_B00063_S04297", + "EN_B00063_S04614", + "EN_B00079_S04698", + "EN_B00104_S01666", + "EN_B00061_S09504", + "EN_B00061_S09694", + "EN_B00065_S05444", + "EN_B00063_S06860", + "EN_B00065_S05725", + "EN_B00069_S07628", + "EN_B00083_S03875", + "EN_B00071_S07665", + "EN_B00071_S07665", + "EN_B00062_S04187", + "EN_B00065_S09873", + "EN_B00065_S09922", + "EN_B00084_S02463", + "EN_B00067_S05066", + "EN_B00106_S08060", + "EN_B00073_S06399", + "EN_B00073_S09236", + "EN_B00087_S00432", + "EN_B00085_S05618", + "EN_B00064_S01262", + "EN_B00072_S01739", + "EN_B00059_S03913", + "EN_B00069_S04036", + "EN_B00067_S05623", + "EN_B00060_S05389", + "EN_B00060_S07290", + "EN_B00062_S08995", +} +en_filters = ["ا", "い", "て"] + + +def deal_with_audio_dir(audio_dir): + audio_jsonl = audio_dir.with_suffix(".jsonl") + sub_result, durations = [], [] + vocab_set = set() + bad_case_zh = 0 + bad_case_en = 0 + with open(audio_jsonl, "r") as f: + lines = f.readlines() + for line in tqdm(lines, desc=f"{audio_jsonl.stem}"): + obj = json.loads(line) + text = obj["text"] + if obj["language"] == "zh": + if obj["wav"].split("/")[1] in out_zh or any(f in text for f in zh_filters) or repetition_found(text): + bad_case_zh += 1 + continue + else: + text = text.translate( + str.maketrans({",": ",", "!": "!", "?": "?"}) + ) # not "。" cuz much code-switched + if obj["language"] == "en": + if ( + obj["wav"].split("/")[1] in out_en + or any(f in text for f in en_filters) + or repetition_found(text, length=4) + ): + bad_case_en += 1 + continue + if tokenizer == "pinyin": + text = convert_char_to_pinyin([text], polyphone=polyphone)[0] + duration = obj["duration"] + sub_result.append({"audio_path": str(audio_dir.parent / obj["wav"]), "text": text, "duration": duration}) + durations.append(duration) + vocab_set.update(list(text)) + return sub_result, durations, vocab_set, bad_case_zh, bad_case_en + + +def main(): + assert tokenizer in ["pinyin", "char"] + result = [] + duration_list = [] + text_vocab_set = set() + total_bad_case_zh = 0 + total_bad_case_en = 0 + + # process raw data + executor = ProcessPoolExecutor(max_workers=max_workers) + futures = [] + for lang in langs: + dataset_path = Path(os.path.join(dataset_dir, lang)) + [ + futures.append(executor.submit(deal_with_audio_dir, audio_dir)) + for audio_dir in dataset_path.iterdir() + if audio_dir.is_dir() + ] + for futures in tqdm(futures, total=len(futures)): + sub_result, durations, vocab_set, bad_case_zh, bad_case_en = futures.result() + result.extend(sub_result) + duration_list.extend(durations) + text_vocab_set.update(vocab_set) + total_bad_case_zh += bad_case_zh + total_bad_case_en += bad_case_en + executor.shutdown() + + # save preprocessed dataset to disk + if not os.path.exists(f"{save_dir}"): + os.makedirs(f"{save_dir}") + print(f"\nSaving to {save_dir} ...") + + # dataset = Dataset.from_dict({"audio_path": audio_path_list, "text": text_list, "duration": duration_list}) # oom + # dataset.save_to_disk(f"{save_dir}/raw", max_shard_size="2GB") + with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer: + for line in tqdm(result, desc="Writing to raw.arrow ..."): + writer.write(line) + + # dup a json separately saving duration in case for DynamicBatchSampler ease + with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f: + json.dump({"duration": duration_list}, f, ensure_ascii=False) + + # vocab map, i.e. tokenizer + # add alphabets and symbols (optional, if plan to ft on de/fr etc.) + # if tokenizer == "pinyin": + # text_vocab_set.update([chr(i) for i in range(32, 127)] + [chr(i) for i in range(192, 256)]) + with open(f"{save_dir}/vocab.txt", "w") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + + print(f"\nFor {dataset_name}, sample count: {len(result)}") + print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}") + print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours") + if "ZH" in langs: + print(f"Bad zh transcription case: {total_bad_case_zh}") + if "EN" in langs: + print(f"Bad en transcription case: {total_bad_case_en}\n") + + +if __name__ == "__main__": + max_workers = 32 + + tokenizer = "pinyin" # "pinyin" | "char" + polyphone = True + + langs = ["ZH", "EN"] + dataset_dir = "/Emilia_Dataset/raw" + dataset_name = f"Emilia_{'_'.join(langs)}_{tokenizer}" + save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}" + print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n") + + main() + + # Emilia ZH & EN + # samples count 37837916 (after removal) + # pinyin vocab size 2543 (polyphone) + # total duration 95281.87 (hours) + # bad zh asr cnt 230435 (samples) + # bad eh asr cnt 37217 (samples) + + # vocab size may be slightly different due to jieba tokenizer and pypinyin (e.g. way of polyphoneme) + # please be careful if using pretrained model, make sure the vocab.txt is same diff --git a/src/f5_tts/train/datasets/prepare_libritts.py b/src/f5_tts/train/datasets/prepare_libritts.py new file mode 100644 index 0000000000000000000000000000000000000000..2a35dd97980154500be715b41a41d6acae15361f --- /dev/null +++ b/src/f5_tts/train/datasets/prepare_libritts.py @@ -0,0 +1,92 @@ +import os +import sys + +sys.path.append(os.getcwd()) + +import json +from concurrent.futures import ProcessPoolExecutor +from importlib.resources import files +from pathlib import Path +from tqdm import tqdm +import soundfile as sf +from datasets.arrow_writer import ArrowWriter + + +def deal_with_audio_dir(audio_dir): + sub_result, durations = [], [] + vocab_set = set() + audio_lists = list(audio_dir.rglob("*.wav")) + + for line in audio_lists: + text_path = line.with_suffix(".normalized.txt") + text = open(text_path, "r").read().strip() + duration = sf.info(line).duration + if duration < 0.4 or duration > 30: + continue + sub_result.append({"audio_path": str(line), "text": text, "duration": duration}) + durations.append(duration) + vocab_set.update(list(text)) + return sub_result, durations, vocab_set + + +def main(): + result = [] + duration_list = [] + text_vocab_set = set() + + # process raw data + executor = ProcessPoolExecutor(max_workers=max_workers) + futures = [] + + for subset in tqdm(SUB_SET): + dataset_path = Path(os.path.join(dataset_dir, subset)) + [ + futures.append(executor.submit(deal_with_audio_dir, audio_dir)) + for audio_dir in dataset_path.iterdir() + if audio_dir.is_dir() + ] + for future in tqdm(futures, total=len(futures)): + sub_result, durations, vocab_set = future.result() + result.extend(sub_result) + duration_list.extend(durations) + text_vocab_set.update(vocab_set) + executor.shutdown() + + # save preprocessed dataset to disk + if not os.path.exists(f"{save_dir}"): + os.makedirs(f"{save_dir}") + print(f"\nSaving to {save_dir} ...") + + with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer: + for line in tqdm(result, desc="Writing to raw.arrow ..."): + writer.write(line) + + # dup a json separately saving duration in case for DynamicBatchSampler ease + with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f: + json.dump({"duration": duration_list}, f, ensure_ascii=False) + + # vocab map, i.e. tokenizer + with open(f"{save_dir}/vocab.txt", "w") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + + print(f"\nFor {dataset_name}, sample count: {len(result)}") + print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}") + print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours") + + +if __name__ == "__main__": + max_workers = 36 + + tokenizer = "char" # "pinyin" | "char" + + SUB_SET = ["train-clean-100", "train-clean-360", "train-other-500"] + dataset_dir = "/LibriTTS" + dataset_name = f"LibriTTS_{'_'.join(SUB_SET)}_{tokenizer}".replace("train-clean-", "").replace("train-other-", "") + save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}" + print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n") + main() + + # For LibriTTS_100_360_500_char, sample count: 354218 + # For LibriTTS_100_360_500_char, vocab size is: 78 + # For LibriTTS_100_360_500_char, total 554.09 hours diff --git a/src/f5_tts/train/datasets/prepare_ljspeech.py b/src/f5_tts/train/datasets/prepare_ljspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..19a5b2a90e562570da9a0bcb65f19590acdee941 --- /dev/null +++ b/src/f5_tts/train/datasets/prepare_ljspeech.py @@ -0,0 +1,65 @@ +import os +import sys + +sys.path.append(os.getcwd()) + +import json +from importlib.resources import files +from pathlib import Path +from tqdm import tqdm +import soundfile as sf +from datasets.arrow_writer import ArrowWriter + + +def main(): + result = [] + duration_list = [] + text_vocab_set = set() + + with open(meta_info, "r") as f: + lines = f.readlines() + for line in tqdm(lines): + uttr, text, norm_text = line.split("|") + norm_text = norm_text.strip() + wav_path = Path(dataset_dir) / "wavs" / f"{uttr}.wav" + duration = sf.info(wav_path).duration + if duration < 0.4 or duration > 30: + continue + result.append({"audio_path": str(wav_path), "text": norm_text, "duration": duration}) + duration_list.append(duration) + text_vocab_set.update(list(norm_text)) + + # save preprocessed dataset to disk + if not os.path.exists(f"{save_dir}"): + os.makedirs(f"{save_dir}") + print(f"\nSaving to {save_dir} ...") + + with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer: + for line in tqdm(result, desc="Writing to raw.arrow ..."): + writer.write(line) + + # dup a json separately saving duration in case for DynamicBatchSampler ease + with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f: + json.dump({"duration": duration_list}, f, ensure_ascii=False) + + # vocab map, i.e. tokenizer + # add alphabets and symbols (optional, if plan to ft on de/fr etc.) + with open(f"{save_dir}/vocab.txt", "w") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + + print(f"\nFor {dataset_name}, sample count: {len(result)}") + print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}") + print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours") + + +if __name__ == "__main__": + tokenizer = "char" # "pinyin" | "char" + + dataset_dir = "/LJSpeech-1.1" + dataset_name = f"LJSpeech_{tokenizer}" + meta_info = os.path.join(dataset_dir, "metadata.csv") + save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}" + print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n") + + main() diff --git a/src/f5_tts/train/datasets/prepare_wenetspeech4tts.py b/src/f5_tts/train/datasets/prepare_wenetspeech4tts.py new file mode 100644 index 0000000000000000000000000000000000000000..bbcdc4818c9fd87e99a37708251e6d83a7013480 --- /dev/null +++ b/src/f5_tts/train/datasets/prepare_wenetspeech4tts.py @@ -0,0 +1,125 @@ +# generate audio text map for WenetSpeech4TTS +# evaluate for vocab size + +import os +import sys + +sys.path.append(os.getcwd()) + +import json +from concurrent.futures import ProcessPoolExecutor +from importlib.resources import files +from tqdm import tqdm + +import torchaudio +from datasets import Dataset + +from f5_tts.model.utils import convert_char_to_pinyin + + +def deal_with_sub_path_files(dataset_path, sub_path): + print(f"Dealing with: {sub_path}") + + text_dir = os.path.join(dataset_path, sub_path, "txts") + audio_dir = os.path.join(dataset_path, sub_path, "wavs") + text_files = os.listdir(text_dir) + + audio_paths, texts, durations = [], [], [] + for text_file in tqdm(text_files): + with open(os.path.join(text_dir, text_file), "r", encoding="utf-8") as file: + first_line = file.readline().split("\t") + audio_nm = first_line[0] + audio_path = os.path.join(audio_dir, audio_nm + ".wav") + text = first_line[1].strip() + + audio_paths.append(audio_path) + + if tokenizer == "pinyin": + texts.extend(convert_char_to_pinyin([text], polyphone=polyphone)) + elif tokenizer == "char": + texts.append(text) + + audio, sample_rate = torchaudio.load(audio_path) + durations.append(audio.shape[-1] / sample_rate) + + return audio_paths, texts, durations + + +def main(): + assert tokenizer in ["pinyin", "char"] + + audio_path_list, text_list, duration_list = [], [], [] + + executor = ProcessPoolExecutor(max_workers=max_workers) + futures = [] + for dataset_path in dataset_paths: + sub_items = os.listdir(dataset_path) + sub_paths = [item for item in sub_items if os.path.isdir(os.path.join(dataset_path, item))] + for sub_path in sub_paths: + futures.append(executor.submit(deal_with_sub_path_files, dataset_path, sub_path)) + for future in tqdm(futures, total=len(futures)): + audio_paths, texts, durations = future.result() + audio_path_list.extend(audio_paths) + text_list.extend(texts) + duration_list.extend(durations) + executor.shutdown() + + if not os.path.exists("data"): + os.makedirs("data") + + print(f"\nSaving to {save_dir} ...") + dataset = Dataset.from_dict({"audio_path": audio_path_list, "text": text_list, "duration": duration_list}) + dataset.save_to_disk(f"{save_dir}/raw", max_shard_size="2GB") # arrow format + + with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f: + json.dump( + {"duration": duration_list}, f, ensure_ascii=False + ) # dup a json separately saving duration in case for DynamicBatchSampler ease + + print("\nEvaluating vocab size (all characters and symbols / all phonemes) ...") + text_vocab_set = set() + for text in tqdm(text_list): + text_vocab_set.update(list(text)) + + # add alphabets and symbols (optional, if plan to ft on de/fr etc.) + if tokenizer == "pinyin": + text_vocab_set.update([chr(i) for i in range(32, 127)] + [chr(i) for i in range(192, 256)]) + + with open(f"{save_dir}/vocab.txt", "w") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + print(f"\nFor {dataset_name}, sample count: {len(text_list)}") + print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}\n") + + +if __name__ == "__main__": + max_workers = 32 + + tokenizer = "pinyin" # "pinyin" | "char" + polyphone = True + dataset_choice = 1 # 1: Premium, 2: Standard, 3: Basic + + dataset_name = ( + ["WenetSpeech4TTS_Premium", "WenetSpeech4TTS_Standard", "WenetSpeech4TTS_Basic"][dataset_choice - 1] + + "_" + + tokenizer + ) + dataset_paths = [ + "/WenetSpeech4TTS/Basic", + "/WenetSpeech4TTS/Standard", + "/WenetSpeech4TTS/Premium", + ][-dataset_choice:] + save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}" + print(f"\nChoose Dataset: {dataset_name}, will save to {save_dir}\n") + + main() + + # Results (if adding alphabets with accents and symbols): + # WenetSpeech4TTS Basic Standard Premium + # samples count 3932473 1941220 407494 + # pinyin vocab size 1349 1348 1344 (no polyphone) + # - - 1459 (polyphone) + # char vocab size 5264 5219 5042 + + # vocab size may be slightly different due to jieba tokenizer and pypinyin (e.g. way of polyphoneme) + # please be careful if using pretrained model, make sure the vocab.txt is same diff --git a/src/f5_tts/train/finetune_cli.py b/src/f5_tts/train/finetune_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..3785179d418814b9349ae03b3335d42eb39765a0 --- /dev/null +++ b/src/f5_tts/train/finetune_cli.py @@ -0,0 +1,215 @@ +import argparse +import os +import shutil +from importlib.resources import files + +from cached_path import cached_path + +from f5_tts.model import CFM, UNetT, DiT, Trainer +from f5_tts.model.utils import get_tokenizer +from f5_tts.model.dataset import load_dataset + + +# -------------------------- Dataset Settings --------------------------- # +target_sample_rate = 24000 +n_mel_channels = 100 +hop_length = 256 +win_length = 1024 +n_fft = 1024 +mel_spec_type = "vocos" # 'vocos' or 'bigvgan' + + +# -------------------------- Argument Parsing --------------------------- # +def parse_args(): + parser = argparse.ArgumentParser(description="Train CFM Model") + + parser.add_argument( + "--exp_name", + type=str, + default="F5TTS_v1_Base", + choices=["F5TTS_v1_Base", "F5TTS_Base", "E2TTS_Base"], + help="Experiment name", + ) + parser.add_argument("--dataset_name", type=str, default="Emilia_ZH_EN", help="Name of the dataset to use") + parser.add_argument("--learning_rate", type=float, default=1e-5, help="Learning rate for training") + parser.add_argument("--batch_size_per_gpu", type=int, default=3200, help="Batch size per GPU") + parser.add_argument( + "--batch_size_type", type=str, default="frame", choices=["frame", "sample"], help="Batch size type" + ) + parser.add_argument("--max_samples", type=int, default=64, help="Max sequences per batch") + parser.add_argument("--grad_accumulation_steps", type=int, default=1, help="Gradient accumulation steps") + parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm for clipping") + parser.add_argument("--epochs", type=int, default=1000, help="Number of training epochs") + parser.add_argument("--num_warmup_updates", type=int, default=300, help="Warmup updates") + parser.add_argument("--save_per_updates", type=int, default=10000, help="Save checkpoint every X updates") + parser.add_argument( + "--keep_last_n_checkpoints", + type=int, + default=-1, + help="-1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints", + ) + parser.add_argument("--last_per_updates", type=int, default=50000, help="Save last checkpoint every X updates") + parser.add_argument("--finetune", action="store_true", help="Use Finetune") + parser.add_argument("--pretrain", type=str, default=None, help="the path to the checkpoint") + parser.add_argument( + "--tokenizer", type=str, default="char", choices=["pinyin", "char", "custom"], help="Tokenizer type" + ) + parser.add_argument( + "--tokenizer_path", + type=str, + default=None, + help="Path to custom tokenizer vocab file (only used if tokenizer = 'custom')", + ) + parser.add_argument( + "--log_samples", + action="store_true", + help="Log inferenced samples per ckpt save updates", + ) + parser.add_argument("--logger", type=str, default=None, choices=["wandb", "tensorboard"], help="logger") + parser.add_argument( + "--bnb_optimizer", + action="store_true", + help="Use 8-bit Adam optimizer from bitsandbytes", + ) + + return parser.parse_args() + + +# -------------------------- Training Settings -------------------------- # + + +def main(): + args = parse_args() + + checkpoint_path = str(files("f5_tts").joinpath(f"../../ckpts/{args.dataset_name}")) + + # Model parameters based on experiment name + + if args.exp_name == "F5TTS_v1_Base": + wandb_resume_id = None + model_cls = DiT + model_cfg = dict( + dim=1024, + depth=22, + heads=16, + ff_mult=2, + text_dim=512, + conv_layers=4, + ) + if args.finetune: + if args.pretrain is None: + ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors")) + else: + ckpt_path = args.pretrain + + elif args.exp_name == "F5TTS_Base": + wandb_resume_id = None + model_cls = DiT + model_cfg = dict( + dim=1024, + depth=22, + heads=16, + ff_mult=2, + text_dim=512, + text_mask_padding=False, + conv_layers=4, + pe_attn_head=1, + ) + if args.finetune: + if args.pretrain is None: + ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt")) + else: + ckpt_path = args.pretrain + + elif args.exp_name == "E2TTS_Base": + wandb_resume_id = None + model_cls = UNetT + model_cfg = dict( + dim=1024, + depth=24, + heads=16, + ff_mult=4, + text_mask_padding=False, + pe_attn_head=1, + ) + if args.finetune: + if args.pretrain is None: + ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt")) + else: + ckpt_path = args.pretrain + + if args.finetune: + if not os.path.isdir(checkpoint_path): + os.makedirs(checkpoint_path, exist_ok=True) + + file_checkpoint = os.path.basename(ckpt_path) + if not file_checkpoint.startswith("pretrained_"): # Change: Add 'pretrained_' prefix to copied model + file_checkpoint = "pretrained_" + file_checkpoint + file_checkpoint = os.path.join(checkpoint_path, file_checkpoint) + if not os.path.isfile(file_checkpoint): + shutil.copy2(ckpt_path, file_checkpoint) + print("copy checkpoint for finetune") + print("Pretrained checkpoint được sử dụng: " + file_checkpoint) + + # Use the tokenizer and tokenizer_path provided in the command line arguments + + tokenizer = args.tokenizer + if tokenizer == "custom": + if not args.tokenizer_path: + raise ValueError("Custom tokenizer selected, but no tokenizer_path provided.") + tokenizer_path = args.tokenizer_path + else: + tokenizer_path = args.dataset_name + + vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer) + + print("vocab : ", vocab_size) + print("vocoder : ", mel_spec_type) + + mel_spec_kwargs = dict( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + n_mel_channels=n_mel_channels, + target_sample_rate=target_sample_rate, + mel_spec_type=mel_spec_type, + ) + + model = CFM( + transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels), + mel_spec_kwargs=mel_spec_kwargs, + vocab_char_map=vocab_char_map, + ) + + trainer = Trainer( + model, + args.epochs, + args.learning_rate, + num_warmup_updates=args.num_warmup_updates, + save_per_updates=args.save_per_updates, + keep_last_n_checkpoints=args.keep_last_n_checkpoints, + checkpoint_path=checkpoint_path, + batch_size_per_gpu=args.batch_size_per_gpu, + batch_size_type=args.batch_size_type, + max_samples=args.max_samples, + grad_accumulation_steps=args.grad_accumulation_steps, + max_grad_norm=args.max_grad_norm, + logger=args.logger, + wandb_project=args.dataset_name, + wandb_run_name=args.exp_name, + wandb_resume_id=wandb_resume_id, + log_samples=args.log_samples, + last_per_updates=args.last_per_updates, + bnb_optimizer=args.bnb_optimizer, + ) + + train_dataset = load_dataset(args.dataset_name, tokenizer, mel_spec_kwargs=mel_spec_kwargs) + + trainer.train( + train_dataset, + resumable_with_seed=666, # seed for shuffling dataset + ) + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/train/finetune_gradio.py b/src/f5_tts/train/finetune_gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..578c93104b930a1de97cfdd77b1193c66eceaccd --- /dev/null +++ b/src/f5_tts/train/finetune_gradio.py @@ -0,0 +1,1869 @@ +import gc +import json +import numpy as np +import os +import platform +import psutil +import queue +import random +import re +import signal +import shutil +import subprocess +import sys +import tempfile +import threading +import time +from glob import glob +from importlib.resources import files +from scipy.io import wavfile + +import click +import gradio as gr +import librosa +import torch +import torchaudio +from cached_path import cached_path +from datasets import Dataset as Dataset_ +from datasets.arrow_writer import ArrowWriter +from safetensors.torch import load_file, save_file + +from f5_tts.api import F5TTS +from f5_tts.model.utils import convert_char_to_pinyin +from f5_tts.infer.utils_infer import transcribe + + +training_process = None +system = platform.system() +python_executable = sys.executable or "python" +tts_api = None +last_checkpoint = "" +last_device = "" +last_ema = None + + +path_data = str(files("f5_tts").joinpath("../../data")) +path_project_ckpts = str(files("f5_tts").joinpath("../../ckpts")) +file_train = str(files("f5_tts").joinpath("train/finetune_cli.py")) + +device = ( + "cuda" + if torch.cuda.is_available() + else "xpu" + if torch.xpu.is_available() + else "mps" + if torch.backends.mps.is_available() + else "cpu" +) + + +# Save settings from a JSON file +def save_settings( + project_name, + exp_name, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + grad_accumulation_steps, + max_grad_norm, + epochs, + num_warmup_updates, + save_per_updates, + keep_last_n_checkpoints, + last_per_updates, + finetune, + file_checkpoint_train, + tokenizer_type, + tokenizer_file, + mixed_precision, + logger, + ch_8bit_adam, +): + path_project = os.path.join(path_project_ckpts, project_name) + os.makedirs(path_project, exist_ok=True) + file_setting = os.path.join(path_project, "setting.json") + + settings = { + "exp_name": exp_name, + "learning_rate": learning_rate, + "batch_size_per_gpu": batch_size_per_gpu, + "batch_size_type": batch_size_type, + "max_samples": max_samples, + "grad_accumulation_steps": grad_accumulation_steps, + "max_grad_norm": max_grad_norm, + "epochs": epochs, + "num_warmup_updates": num_warmup_updates, + "save_per_updates": save_per_updates, + "keep_last_n_checkpoints": keep_last_n_checkpoints, + "last_per_updates": last_per_updates, + "finetune": finetune, + "file_checkpoint_train": file_checkpoint_train, + "tokenizer_type": tokenizer_type, + "tokenizer_file": tokenizer_file, + "mixed_precision": mixed_precision, + "logger": logger, + "bnb_optimizer": ch_8bit_adam, + } + with open(file_setting, "w") as f: + json.dump(settings, f, indent=4) + return "Settings saved!" + + +# Load settings from a JSON file +def load_settings(project_name): + project_name = project_name.replace("_pinyin", "").replace("_char", "") + path_project = os.path.join(path_project_ckpts, project_name) + file_setting = os.path.join(path_project, "setting.json") + + # Default settings + default_settings = { + "exp_name": "F5TTS_v1_Base", + "learning_rate": 1e-5, + "batch_size_per_gpu": 1, + "batch_size_type": "sample", + "max_samples": 64, + "grad_accumulation_steps": 4, + "max_grad_norm": 1, + "epochs": 100, + "num_warmup_updates": 100, + "save_per_updates": 500, + "keep_last_n_checkpoints": -1, + "last_per_updates": 100, + "finetune": True, + "file_checkpoint_train": "", + "tokenizer_type": "pinyin", + "tokenizer_file": "", + "mixed_precision": "none", + "logger": "wandb", + "bnb_optimizer": False, + } + + # Load settings from file if it exists + if os.path.isfile(file_setting): + with open(file_setting, "r") as f: + file_settings = json.load(f) + default_settings.update(file_settings) + + # Return as a tuple in the correct order + return ( + default_settings["exp_name"], + default_settings["learning_rate"], + default_settings["batch_size_per_gpu"], + default_settings["batch_size_type"], + default_settings["max_samples"], + default_settings["grad_accumulation_steps"], + default_settings["max_grad_norm"], + default_settings["epochs"], + default_settings["num_warmup_updates"], + default_settings["save_per_updates"], + default_settings["keep_last_n_checkpoints"], + default_settings["last_per_updates"], + default_settings["finetune"], + default_settings["file_checkpoint_train"], + default_settings["tokenizer_type"], + default_settings["tokenizer_file"], + default_settings["mixed_precision"], + default_settings["logger"], + default_settings["bnb_optimizer"], + ) + + +# Load metadata +def get_audio_duration(audio_path): + """Calculate the duration mono of an audio file.""" + audio, sample_rate = torchaudio.load(audio_path) + return audio.shape[1] / sample_rate + + +def clear_text(text): + """Clean and prepare text by lowering the case and stripping whitespace.""" + return text.lower().strip() + + +def get_rms( + y, + frame_length=2048, + hop_length=512, + pad_mode="constant", +): # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py + padding = (int(frame_length // 2), int(frame_length // 2)) + y = np.pad(y, padding, mode=pad_mode) + + axis = -1 + # put our new within-frame axis at the end for now + out_strides = y.strides + tuple([y.strides[axis]]) + # Reduce the shape on the framing axis + x_shape_trimmed = list(y.shape) + x_shape_trimmed[axis] -= frame_length - 1 + out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) + xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) + if axis < 0: + target_axis = axis - 1 + else: + target_axis = axis + 1 + xw = np.moveaxis(xw, -1, target_axis) + # Downsample along the target axis + slices = [slice(None)] * xw.ndim + slices[axis] = slice(0, None, hop_length) + x = xw[tuple(slices)] + + # Calculate power + power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) + + return np.sqrt(power) + + +class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py + def __init__( + self, + sr: int, + threshold: float = -40.0, + min_length: int = 2000, + min_interval: int = 300, + hop_size: int = 20, + max_sil_kept: int = 2000, + ): + if not min_length >= min_interval >= hop_size: + raise ValueError("The following condition must be satisfied: min_length >= min_interval >= hop_size") + if not max_sil_kept >= hop_size: + raise ValueError("The following condition must be satisfied: max_sil_kept >= hop_size") + min_interval = sr * min_interval / 1000 + self.threshold = 10 ** (threshold / 20.0) + self.hop_size = round(sr * hop_size / 1000) + self.win_size = min(round(min_interval), 4 * self.hop_size) + self.min_length = round(sr * min_length / 1000 / self.hop_size) + self.min_interval = round(min_interval / self.hop_size) + self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) + + def _apply_slice(self, waveform, begin, end): + if len(waveform.shape) > 1: + return waveform[:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)] + else: + return waveform[begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)] + + # @timeit + def slice(self, waveform): + if len(waveform.shape) > 1: + samples = waveform.mean(axis=0) + else: + samples = waveform + if samples.shape[0] <= self.min_length: + return [waveform] + rms_list = get_rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) + sil_tags = [] + silence_start = None + clip_start = 0 + for i, rms in enumerate(rms_list): + # Keep looping while frame is silent. + if rms < self.threshold: + # Record start of silent frames. + if silence_start is None: + silence_start = i + continue + # Keep looping while frame is not silent and silence start has not been recorded. + if silence_start is None: + continue + # Clear recorded silence start if interval is not enough or clip is too short + is_leading_silence = silence_start == 0 and i > self.max_sil_kept + need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length + if not is_leading_silence and not need_slice_middle: + silence_start = None + continue + # Need slicing. Record the range of silent frames to be removed. + if i - silence_start <= self.max_sil_kept: + pos = rms_list[silence_start : i + 1].argmin() + silence_start + if silence_start == 0: + sil_tags.append((0, pos)) + else: + sil_tags.append((pos, pos)) + clip_start = pos + elif i - silence_start <= self.max_sil_kept * 2: + pos = rms_list[i - self.max_sil_kept : silence_start + self.max_sil_kept + 1].argmin() + pos += i - self.max_sil_kept + pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start + pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept + if silence_start == 0: + sil_tags.append((0, pos_r)) + clip_start = pos_r + else: + sil_tags.append((min(pos_l, pos), max(pos_r, pos))) + clip_start = max(pos_r, pos) + else: + pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start + pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept + if silence_start == 0: + sil_tags.append((0, pos_r)) + else: + sil_tags.append((pos_l, pos_r)) + clip_start = pos_r + silence_start = None + # Deal with trailing silence. + total_frames = rms_list.shape[0] + if silence_start is not None and total_frames - silence_start >= self.min_interval: + silence_end = min(total_frames, silence_start + self.max_sil_kept) + pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start + sil_tags.append((pos, total_frames + 1)) + # Apply and return slices. + ####音频+起始时间+终止时间 + if len(sil_tags) == 0: + return [[waveform, 0, int(total_frames * self.hop_size)]] + else: + chunks = [] + if sil_tags[0][0] > 0: + chunks.append([self._apply_slice(waveform, 0, sil_tags[0][0]), 0, int(sil_tags[0][0] * self.hop_size)]) + for i in range(len(sil_tags) - 1): + chunks.append( + [ + self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]), + int(sil_tags[i][1] * self.hop_size), + int(sil_tags[i + 1][0] * self.hop_size), + ] + ) + if sil_tags[-1][1] < total_frames: + chunks.append( + [ + self._apply_slice(waveform, sil_tags[-1][1], total_frames), + int(sil_tags[-1][1] * self.hop_size), + int(total_frames * self.hop_size), + ] + ) + return chunks + + +# terminal +def terminate_process_tree(pid, including_parent=True): + try: + parent = psutil.Process(pid) + except psutil.NoSuchProcess: + # Process already terminated + return + + children = parent.children(recursive=True) + for child in children: + try: + os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + if including_parent: + try: + os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + + +def terminate_process(pid): + if system == "Windows": + cmd = f"taskkill /t /f /pid {pid}" + os.system(cmd) + else: + terminate_process_tree(pid) + + +def start_training( + dataset_name="", + exp_name="F5TTS_v1_Base", + learning_rate=1e-5, + batch_size_per_gpu=1, + batch_size_type="sample", + max_samples=64, + grad_accumulation_steps=4, + max_grad_norm=1.0, + epochs=100, + num_warmup_updates=100, + save_per_updates=500, + keep_last_n_checkpoints=-1, + last_per_updates=100, + finetune=True, + file_checkpoint_train="", + tokenizer_type="pinyin", + tokenizer_file="", + mixed_precision="fp16", + stream=False, + logger="wandb", + ch_8bit_adam=False, +): + global training_process, tts_api, stop_signal + + if tts_api is not None: + if tts_api is not None: + del tts_api + + gc.collect() + torch.cuda.empty_cache() + tts_api = None + + path_project = os.path.join(path_data, dataset_name) + + if not os.path.isdir(path_project): + yield ( + f"There is not project with name {dataset_name}", + gr.update(interactive=True), + gr.update(interactive=False), + ) + return + + file_raw = os.path.join(path_project, "raw.arrow") + if not os.path.isfile(file_raw): + yield f"There is no file {file_raw}", gr.update(interactive=True), gr.update(interactive=False) + return + + # Check if a training process is already running + if training_process is not None: + return "Train run already!", gr.update(interactive=False), gr.update(interactive=True) + + yield "start train", gr.update(interactive=False), gr.update(interactive=False) + + # Command to run the training script with the specified arguments + + if tokenizer_file == "": + if dataset_name.endswith("_pinyin"): + tokenizer_type = "pinyin" + elif dataset_name.endswith("_char"): + tokenizer_type = "char" + else: + tokenizer_type = "custom" + + dataset_name = dataset_name.replace("_pinyin", "").replace("_char", "") + + if mixed_precision != "none": + fp16 = f"--mixed_precision={mixed_precision}" + else: + fp16 = "" + + cmd = ( + f"accelerate launch {fp16} {file_train} --exp_name {exp_name}" + f" --learning_rate {learning_rate}" + f" --batch_size_per_gpu {batch_size_per_gpu}" + f" --batch_size_type {batch_size_type}" + f" --max_samples {max_samples}" + f" --grad_accumulation_steps {grad_accumulation_steps}" + f" --max_grad_norm {max_grad_norm}" + f" --epochs {epochs}" + f" --num_warmup_updates {num_warmup_updates}" + f" --save_per_updates {save_per_updates}" + f" --keep_last_n_checkpoints {keep_last_n_checkpoints}" + f" --last_per_updates {last_per_updates}" + f" --dataset_name {dataset_name}" + ) + + if finetune: + cmd += " --finetune" + + if file_checkpoint_train != "": + cmd += f" --pretrain {file_checkpoint_train}" + + if tokenizer_file != "": + cmd += f" --tokenizer_path {tokenizer_file}" + + cmd += f" --tokenizer {tokenizer_type}" + + cmd += f" --log_samples --logger {logger}" + + if ch_8bit_adam: + cmd += " --bnb_optimizer" + + print("run command : \n" + cmd + "\n") + + save_settings( + dataset_name, + exp_name, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + grad_accumulation_steps, + max_grad_norm, + epochs, + num_warmup_updates, + save_per_updates, + keep_last_n_checkpoints, + last_per_updates, + finetune, + file_checkpoint_train, + tokenizer_type, + tokenizer_file, + mixed_precision, + logger, + ch_8bit_adam, + ) + + try: + if not stream: + # Start the training process + training_process = subprocess.Popen(cmd, shell=True) + + time.sleep(5) + yield "train start", gr.update(interactive=False), gr.update(interactive=True) + + # Wait for the training process to finish + training_process.wait() + else: + + def stream_output(pipe, output_queue): + try: + for line in iter(pipe.readline, ""): + output_queue.put(line) + except Exception as e: + output_queue.put(f"Error reading pipe: {str(e)}") + finally: + pipe.close() + + env = os.environ.copy() + env["PYTHONUNBUFFERED"] = "1" + + training_process = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, env=env + ) + yield "Training started...", gr.update(interactive=False), gr.update(interactive=True) + + stdout_queue = queue.Queue() + stderr_queue = queue.Queue() + + stdout_thread = threading.Thread(target=stream_output, args=(training_process.stdout, stdout_queue)) + stderr_thread = threading.Thread(target=stream_output, args=(training_process.stderr, stderr_queue)) + stdout_thread.daemon = True + stderr_thread.daemon = True + stdout_thread.start() + stderr_thread.start() + stop_signal = False + while True: + if stop_signal: + training_process.terminate() + time.sleep(0.5) + if training_process.poll() is None: + training_process.kill() + yield "Training stopped by user.", gr.update(interactive=True), gr.update(interactive=False) + break + + process_status = training_process.poll() + + # Handle stdout + try: + while True: + output = stdout_queue.get_nowait() + print(output, end="") + match = re.search( + r"Epoch (\d+)/(\d+):\s+(\d+)%\|.*\[(\d+:\d+)<.*?loss=(\d+\.\d+), update=(\d+)", output + ) + if match: + current_epoch = match.group(1) + total_epochs = match.group(2) + percent_complete = match.group(3) + elapsed_time = match.group(4) + loss = match.group(5) + current_update = match.group(6) + message = ( + f"Epoch: {current_epoch}/{total_epochs}, " + f"Progress: {percent_complete}%, " + f"Elapsed Time: {elapsed_time}, " + f"Loss: {loss}, " + f"Update: {current_update}" + ) + yield message, gr.update(interactive=False), gr.update(interactive=True) + elif output.strip(): + yield output, gr.update(interactive=False), gr.update(interactive=True) + except queue.Empty: + pass + + # Handle stderr + try: + while True: + error_output = stderr_queue.get_nowait() + print(error_output, end="") + if error_output.strip(): + yield f"{error_output.strip()}", gr.update(interactive=False), gr.update(interactive=True) + except queue.Empty: + pass + + if process_status is not None and stdout_queue.empty() and stderr_queue.empty(): + if process_status != 0: + yield ( + f"Process crashed with exit code {process_status}!", + gr.update(interactive=False), + gr.update(interactive=True), + ) + else: + yield "Training complete!", gr.update(interactive=False), gr.update(interactive=True) + break + + # Small sleep to prevent CPU thrashing + time.sleep(0.1) + + # Clean up + training_process.stdout.close() + training_process.stderr.close() + training_process.wait() + + time.sleep(1) + + if training_process is None: + text_info = "train stop" + else: + text_info = "train complete !" + + except Exception as e: # Catch all exceptions + # Ensure that we reset the training process variable in case of an error + text_info = f"An error occurred: {str(e)}" + + training_process = None + + yield text_info, gr.update(interactive=True), gr.update(interactive=False) + + +def stop_training(): + global training_process, stop_signal + + if training_process is None: + return "Train not run !", gr.update(interactive=True), gr.update(interactive=False) + terminate_process_tree(training_process.pid) + # training_process = None + stop_signal = True + return "train stop", gr.update(interactive=True), gr.update(interactive=False) + + +def get_list_projects(): + project_list = [] + for folder in os.listdir(path_data): + path_folder = os.path.join(path_data, folder) + if not os.path.isdir(path_folder): + continue + folder = folder.lower() + if folder == "emilia_zh_en_pinyin": + continue + project_list.append(folder) + + projects_selelect = None if not project_list else project_list[-1] + + return project_list, projects_selelect + + +def create_data_project(name, tokenizer_type): + name += "_" + tokenizer_type + os.makedirs(os.path.join(path_data, name), exist_ok=True) + os.makedirs(os.path.join(path_data, name, "dataset"), exist_ok=True) + project_list, projects_selelect = get_list_projects() + return gr.update(choices=project_list, value=name) + + +def transcribe_all(name_project, audio_files, language, user=False, progress=gr.Progress()): + path_project = os.path.join(path_data, name_project) + path_dataset = os.path.join(path_project, "dataset") + path_project_wavs = os.path.join(path_project, "wavs") + file_metadata = os.path.join(path_project, "metadata.csv") + + if not user: + if audio_files is None: + return "You need to load an audio file." + + if os.path.isdir(path_project_wavs): + shutil.rmtree(path_project_wavs) + + if os.path.isfile(file_metadata): + os.remove(file_metadata) + + os.makedirs(path_project_wavs, exist_ok=True) + + if user: + file_audios = [ + file + for format in ("*.wav", "*.ogg", "*.opus", "*.mp3", "*.flac") + for file in glob(os.path.join(path_dataset, format)) + ] + if file_audios == []: + return "No audio file was found in the dataset." + else: + file_audios = audio_files + + alpha = 0.5 + _max = 1.0 + slicer = Slicer(24000) + + num = 0 + error_num = 0 + data = "" + for file_audio in progress.tqdm(file_audios, desc="transcribe files", total=len((file_audios))): + audio, _ = librosa.load(file_audio, sr=24000, mono=True) + + list_slicer = slicer.slice(audio) + for chunk, start, end in progress.tqdm(list_slicer, total=len(list_slicer), desc="slicer files"): + name_segment = os.path.join(f"segment_{num}") + file_segment = os.path.join(path_project_wavs, f"{name_segment}.wav") + + tmp_max = np.abs(chunk).max() + if tmp_max > 1: + chunk /= tmp_max + chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk + wavfile.write(file_segment, 24000, (chunk * 32767).astype(np.int16)) + + try: + text = transcribe(file_segment, language) + text = text.lower().strip().replace('"', "") + + data += f"{name_segment}|{text}\n" + + num += 1 + except: # noqa: E722 + error_num += 1 + + with open(file_metadata, "w", encoding="utf-8-sig") as f: + f.write(data) + + if error_num != []: + error_text = f"\nerror files : {error_num}" + else: + error_text = "" + + return f"transcribe complete samples : {num}\npath : {path_project_wavs}{error_text}" + + +def format_seconds_to_hms(seconds): + hours = int(seconds / 3600) + minutes = int((seconds % 3600) / 60) + seconds = seconds % 60 + return "{:02d}:{:02d}:{:02d}".format(hours, minutes, int(seconds)) + + +def get_correct_audio_path( + audio_input, + base_path="wavs", + supported_formats=("wav", "mp3", "aac", "flac", "m4a", "alac", "ogg", "aiff", "wma", "amr"), +): + file_audio = None + + # Helper function to check if file has a supported extension + def has_supported_extension(file_name): + return any(file_name.endswith(f".{ext}") for ext in supported_formats) + + # Case 1: If it's a full path with a valid extension, use it directly + if os.path.isabs(audio_input) and has_supported_extension(audio_input): + file_audio = audio_input + + # Case 2: If it has a supported extension but is not a full path + elif has_supported_extension(audio_input) and not os.path.isabs(audio_input): + file_audio = os.path.join(base_path, audio_input) + + # Case 3: If only the name is given (no extension and not a full path) + elif not has_supported_extension(audio_input) and not os.path.isabs(audio_input): + for ext in supported_formats: + potential_file = os.path.join(base_path, f"{audio_input}.{ext}") + if os.path.exists(potential_file): + file_audio = potential_file + break + else: + file_audio = os.path.join(base_path, f"{audio_input}.{supported_formats[0]}") + return file_audio + + +def create_metadata(name_project, ch_tokenizer, progress=gr.Progress()): + path_project = os.path.join(path_data, name_project) + path_project_wavs = os.path.join(path_project, "wavs") + file_metadata = os.path.join(path_project, "metadata.csv") + file_raw = os.path.join(path_project, "raw.arrow") + file_duration = os.path.join(path_project, "duration.json") + file_vocab = os.path.join(path_project, "vocab.txt") + + if not os.path.isfile(file_metadata): + return "The file was not found in " + file_metadata, "" + + with open(file_metadata, "r", encoding="utf-8-sig") as f: + data = f.read() + + audio_path_list = [] + text_list = [] + duration_list = [] + + count = data.split("\n") + lenght = 0 + result = [] + error_files = [] + text_vocab_set = set() + for line in progress.tqdm(data.split("\n"), total=count): + sp_line = line.split("|") + if len(sp_line) != 2: + continue + name_audio, text = sp_line[:2] + + file_audio = get_correct_audio_path(name_audio, path_project_wavs) + + if not os.path.isfile(file_audio): + error_files.append([file_audio, "error path"]) + continue + + try: + duration = get_audio_duration(file_audio) + except Exception as e: + error_files.append([file_audio, "duration"]) + print(f"Error processing {file_audio}: {e}") + continue + + if duration < 1 or duration > 30: + if duration > 30: + error_files.append([file_audio, "duration > 30 sec"]) + if duration < 1: + error_files.append([file_audio, "duration < 1 sec "]) + continue + if len(text) < 3: + error_files.append([file_audio, "very short text length 3"]) + continue + + text = clear_text(text) + text = convert_char_to_pinyin([text], polyphone=True)[0] + + audio_path_list.append(file_audio) + duration_list.append(duration) + text_list.append(text) + + result.append({"audio_path": file_audio, "text": text, "duration": duration}) + if ch_tokenizer: + text_vocab_set.update(list(text)) + + lenght += duration + + if duration_list == []: + return f"Error: No audio files found in the specified path : {path_project_wavs}", "" + + min_second = round(min(duration_list), 2) + max_second = round(max(duration_list), 2) + + with ArrowWriter(path=file_raw, writer_batch_size=1) as writer: + for line in progress.tqdm(result, total=len(result), desc="prepare data"): + writer.write(line) + + with open(file_duration, "w") as f: + json.dump({"duration": duration_list}, f, ensure_ascii=False) + + new_vocal = "" + if not ch_tokenizer: + if not os.path.isfile(file_vocab): + file_vocab_finetune = os.path.join(path_data, "Emilia_ZH_EN_pinyin/vocab.txt") + if not os.path.isfile(file_vocab_finetune): + return "Error: Vocabulary file 'Emilia_ZH_EN_pinyin' not found!", "" + shutil.copy2(file_vocab_finetune, file_vocab) + + with open(file_vocab, "r", encoding="utf-8-sig") as f: + vocab_char_map = {} + for i, char in enumerate(f): + vocab_char_map[char[:-1]] = i + vocab_size = len(vocab_char_map) + + else: + with open(file_vocab, "w", encoding="utf-8-sig") as f: + for vocab in sorted(text_vocab_set): + f.write(vocab + "\n") + new_vocal += vocab + "\n" + vocab_size = len(text_vocab_set) + + if error_files != []: + error_text = "\n".join([" = ".join(item) for item in error_files]) + else: + error_text = "" + + return ( + f"prepare complete \nsamples : {len(text_list)}\ntime data : {format_seconds_to_hms(lenght)}\nmin sec : {min_second}\nmax sec : {max_second}\nfile_arrow : {file_raw}\nvocab : {vocab_size}\n{error_text}", + new_vocal, + ) + + +def check_user(value): + return gr.update(visible=not value), gr.update(visible=value) + + +def calculate_train( + name_project, + epochs, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + num_warmup_updates, + finetune, +): + path_project = os.path.join(path_data, name_project) + file_duration = os.path.join(path_project, "duration.json") + + hop_length = 256 + sampling_rate = 24000 + + if not os.path.isfile(file_duration): + return ( + epochs, + learning_rate, + batch_size_per_gpu, + max_samples, + num_warmup_updates, + "project not found !", + ) + + with open(file_duration, "r") as file: + data = json.load(file) + + duration_list = data["duration"] + max_sample_length = max(duration_list) * sampling_rate / hop_length + total_samples = len(duration_list) + total_duration = sum(duration_list) + + if torch.cuda.is_available(): + gpu_count = torch.cuda.device_count() + total_memory = 0 + for i in range(gpu_count): + gpu_properties = torch.cuda.get_device_properties(i) + total_memory += gpu_properties.total_memory / (1024**3) # in GB + elif torch.xpu.is_available(): + gpu_count = torch.xpu.device_count() + total_memory = 0 + for i in range(gpu_count): + gpu_properties = torch.xpu.get_device_properties(i) + total_memory += gpu_properties.total_memory / (1024**3) + elif torch.backends.mps.is_available(): + gpu_count = 1 + total_memory = psutil.virtual_memory().available / (1024**3) + + avg_gpu_memory = total_memory / gpu_count + + # rough estimate of batch size + if batch_size_type == "frame": + batch_size_per_gpu = max(int(38400 * (avg_gpu_memory - 5) / 75), int(max_sample_length)) + elif batch_size_type == "sample": + batch_size_per_gpu = int(200 / (total_duration / total_samples)) + + if total_samples < 64: + max_samples = int(total_samples * 0.25) + + num_warmup_updates = max(num_warmup_updates, int(total_samples * 0.05)) + + # take 1.2M updates as the maximum + max_updates = 1200000 + + if batch_size_type == "frame": + mini_batch_duration = batch_size_per_gpu * gpu_count * hop_length / sampling_rate + updates_per_epoch = total_duration / mini_batch_duration + elif batch_size_type == "sample": + updates_per_epoch = total_samples / batch_size_per_gpu / gpu_count + + epochs = int(max_updates / updates_per_epoch) + + if finetune: + learning_rate = 1e-5 + else: + learning_rate = 7.5e-5 + + return ( + epochs, + learning_rate, + batch_size_per_gpu, + max_samples, + num_warmup_updates, + total_samples, + ) + + +def extract_and_save_ema_model(checkpoint_path: str, new_checkpoint_path: str, safetensors: bool) -> str: + try: + checkpoint = torch.load(checkpoint_path, weights_only=True) + print("Original Checkpoint Keys:", checkpoint.keys()) + + ema_model_state_dict = checkpoint.get("ema_model_state_dict", None) + if ema_model_state_dict is None: + return "No 'ema_model_state_dict' found in the checkpoint." + + if safetensors: + new_checkpoint_path = new_checkpoint_path.replace(".pt", ".safetensors") + save_file(ema_model_state_dict, new_checkpoint_path) + else: + new_checkpoint_path = new_checkpoint_path.replace(".safetensors", ".pt") + new_checkpoint = {"ema_model_state_dict": ema_model_state_dict} + torch.save(new_checkpoint, new_checkpoint_path) + + return f"New checkpoint saved at: {new_checkpoint_path}" + + except Exception as e: + return f"An error occurred: {e}" + + +def expand_model_embeddings(ckpt_path, new_ckpt_path, num_new_tokens=42): + seed = 666 + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if ckpt_path.endswith(".safetensors"): + ckpt = load_file(ckpt_path, device="cpu") + ckpt = {"ema_model_state_dict": ckpt} + elif ckpt_path.endswith(".pt"): + ckpt = torch.load(ckpt_path, map_location="cpu") + + ema_sd = ckpt.get("ema_model_state_dict", {}) + embed_key_ema = "ema_model.transformer.text_embed.text_embed.weight" + old_embed_ema = ema_sd[embed_key_ema] + + vocab_old = old_embed_ema.size(0) + embed_dim = old_embed_ema.size(1) + vocab_new = vocab_old + num_new_tokens + + def expand_embeddings(old_embeddings): + new_embeddings = torch.zeros((vocab_new, embed_dim)) + new_embeddings[:vocab_old] = old_embeddings + new_embeddings[vocab_old:] = torch.randn((num_new_tokens, embed_dim)) + return new_embeddings + + ema_sd[embed_key_ema] = expand_embeddings(ema_sd[embed_key_ema]) + + torch.save(ckpt, new_ckpt_path) + + return vocab_new + + +def vocab_count(text): + return str(len(text.split(","))) + + +def vocab_extend(project_name, symbols, model_type): + if symbols == "": + return "Symbols empty!" + + name_project = project_name + path_project = os.path.join(path_data, name_project) + file_vocab_project = os.path.join(path_project, "vocab.txt") + + file_vocab = os.path.join(path_data, "Emilia_ZH_EN_pinyin/vocab.txt") + if not os.path.isfile(file_vocab): + return f"the file {file_vocab} not found !" + + symbols = symbols.split(",") + if symbols == []: + return "Symbols to extend not found." + + with open(file_vocab, "r", encoding="utf-8-sig") as f: + data = f.read() + vocab = data.split("\n") + vocab_check = set(vocab) + + miss_symbols = [] + for item in symbols: + item = item.replace(" ", "") + if item in vocab_check: + continue + miss_symbols.append(item) + + if miss_symbols == []: + return "Symbols are okay no need to extend." + + size_vocab = len(vocab) + vocab.pop() + for item in miss_symbols: + vocab.append(item) + + vocab.append("") + + with open(file_vocab_project, "w", encoding="utf-8") as f: + f.write("\n".join(vocab)) + + if model_type == "F5TTS_v1_Base": + ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors")) + elif model_type == "F5TTS_Base": + ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt")) + elif model_type == "E2TTS_Base": + ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt")) + + vocab_size_new = len(miss_symbols) + + dataset_name = name_project.replace("_pinyin", "").replace("_char", "") + new_ckpt_path = os.path.join(path_project_ckpts, dataset_name) + os.makedirs(new_ckpt_path, exist_ok=True) + + # Add pretrained_ prefix to model when copying for consistency with finetune_cli.py + new_ckpt_file = os.path.join(new_ckpt_path, "pretrained_" + os.path.basename(ckpt_path)) + + size = expand_model_embeddings(ckpt_path, new_ckpt_file, num_new_tokens=vocab_size_new) + + vocab_new = "\n".join(miss_symbols) + return f"vocab old size : {size_vocab}\nvocab new size : {size}\nvocab add : {vocab_size_new}\nnew symbols :\n{vocab_new}" + + +def vocab_check(project_name): + name_project = project_name + path_project = os.path.join(path_data, name_project) + + file_metadata = os.path.join(path_project, "metadata.csv") + + file_vocab = os.path.join(path_data, "Emilia_ZH_EN_pinyin/vocab.txt") + if not os.path.isfile(file_vocab): + return f"the file {file_vocab} not found !", "" + + with open(file_vocab, "r", encoding="utf-8-sig") as f: + data = f.read() + vocab = data.split("\n") + vocab = set(vocab) + + if not os.path.isfile(file_metadata): + return f"the file {file_metadata} not found !", "" + + with open(file_metadata, "r", encoding="utf-8-sig") as f: + data = f.read() + + miss_symbols = [] + miss_symbols_keep = {} + for item in data.split("\n"): + sp = item.split("|") + if len(sp) != 2: + continue + + text = sp[1].lower().strip() + + for t in text: + if t not in vocab and t not in miss_symbols_keep: + miss_symbols.append(t) + miss_symbols_keep[t] = t + + if miss_symbols == []: + vocab_miss = "" + info = "You can train using your language !" + else: + vocab_miss = ",".join(miss_symbols) + info = f"The following symbols are missing in your language {len(miss_symbols)}\n\n" + + return info, vocab_miss + + +def get_random_sample_prepare(project_name): + name_project = project_name + path_project = os.path.join(path_data, name_project) + file_arrow = os.path.join(path_project, "raw.arrow") + if not os.path.isfile(file_arrow): + return "", None + dataset = Dataset_.from_file(file_arrow) + random_sample = dataset.shuffle(seed=random.randint(0, 1000)).select([0]) + text = "[" + " , ".join(["' " + t + " '" for t in random_sample["text"][0]]) + "]" + audio_path = random_sample["audio_path"][0] + return text, audio_path + + +def get_random_sample_transcribe(project_name): + name_project = project_name + path_project = os.path.join(path_data, name_project) + file_metadata = os.path.join(path_project, "metadata.csv") + if not os.path.isfile(file_metadata): + return "", None + + data = "" + with open(file_metadata, "r", encoding="utf-8-sig") as f: + data = f.read() + + list_data = [] + for item in data.split("\n"): + sp = item.split("|") + if len(sp) != 2: + continue + + # fixed audio when it is absolute + file_audio = get_correct_audio_path(sp[0], os.path.join(path_project, "wavs")) + list_data.append([file_audio, sp[1]]) + + if list_data == []: + return "", None + + random_item = random.choice(list_data) + + return random_item[1], random_item[0] + + +def get_random_sample_infer(project_name): + text, audio = get_random_sample_transcribe(project_name) + return ( + text, + text, + audio, + ) + + +def infer( + project, file_checkpoint, exp_name, ref_text, ref_audio, gen_text, nfe_step, use_ema, speed, seed, remove_silence +): + global last_checkpoint, last_device, tts_api, last_ema + + if not os.path.isfile(file_checkpoint): + return None, "checkpoint not found!" + + if training_process is not None: + device_test = "cpu" + else: + device_test = None + + if last_checkpoint != file_checkpoint or last_device != device_test or last_ema != use_ema or tts_api is None: + if last_checkpoint != file_checkpoint: + last_checkpoint = file_checkpoint + + if last_device != device_test: + last_device = device_test + + if last_ema != use_ema: + last_ema = use_ema + + vocab_file = os.path.join(path_data, project, "vocab.txt") + + tts_api = F5TTS( + model=exp_name, ckpt_file=file_checkpoint, vocab_file=vocab_file, device=device_test, use_ema=use_ema + ) + + print("update >> ", device_test, file_checkpoint, use_ema) + + with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: + tts_api.infer( + ref_file=ref_audio, + ref_text=ref_text.lower().strip(), + gen_text=gen_text.lower().strip(), + nfe_step=nfe_step, + speed=speed, + remove_silence=remove_silence, + file_wave=f.name, + seed=seed, + ) + return f.name, tts_api.device, str(tts_api.seed) + + +def check_finetune(finetune): + return gr.update(interactive=finetune), gr.update(interactive=finetune), gr.update(interactive=finetune) + + +def get_checkpoints_project(project_name, is_gradio=True): + if project_name is None: + return [], "" + project_name = project_name.replace("_pinyin", "").replace("_char", "") + + if os.path.isdir(path_project_ckpts): + files_checkpoints = glob(os.path.join(path_project_ckpts, project_name, "*.pt")) + # Separate pretrained and regular checkpoints + pretrained_checkpoints = [f for f in files_checkpoints if "pretrained_" in os.path.basename(f)] + regular_checkpoints = [ + f + for f in files_checkpoints + if "pretrained_" not in os.path.basename(f) and "model_last.pt" not in os.path.basename(f) + ] + last_checkpoint = [f for f in files_checkpoints if "model_last.pt" in os.path.basename(f)] + + # Sort regular checkpoints by number + regular_checkpoints = sorted( + regular_checkpoints, key=lambda x: int(os.path.basename(x).split("_")[1].split(".")[0]) + ) + + # Combine in order: pretrained, regular, last + files_checkpoints = pretrained_checkpoints + regular_checkpoints + last_checkpoint + else: + files_checkpoints = [] + + selelect_checkpoint = None if not files_checkpoints else files_checkpoints[0] + + if is_gradio: + return gr.update(choices=files_checkpoints, value=selelect_checkpoint) + + return files_checkpoints, selelect_checkpoint + + +def get_audio_project(project_name, is_gradio=True): + if project_name is None: + return [], "" + project_name = project_name.replace("_pinyin", "").replace("_char", "") + + if os.path.isdir(path_project_ckpts): + files_audios = glob(os.path.join(path_project_ckpts, project_name, "samples", "*.wav")) + files_audios = sorted(files_audios, key=lambda x: int(os.path.basename(x).split("_")[1].split(".")[0])) + + files_audios = [item.replace("_gen.wav", "") for item in files_audios if item.endswith("_gen.wav")] + else: + files_audios = [] + + selelect_checkpoint = None if not files_audios else files_audios[0] + + if is_gradio: + return gr.update(choices=files_audios, value=selelect_checkpoint) + + return files_audios, selelect_checkpoint + + +def get_gpu_stats(): + gpu_stats = "" + + if torch.cuda.is_available(): + gpu_count = torch.cuda.device_count() + for i in range(gpu_count): + gpu_name = torch.cuda.get_device_name(i) + gpu_properties = torch.cuda.get_device_properties(i) + total_memory = gpu_properties.total_memory / (1024**3) # in GB + allocated_memory = torch.cuda.memory_allocated(i) / (1024**2) # in MB + reserved_memory = torch.cuda.memory_reserved(i) / (1024**2) # in MB + + gpu_stats += ( + f"GPU {i} Name: {gpu_name}\n" + f"Total GPU memory (GPU {i}): {total_memory:.2f} GB\n" + f"Allocated GPU memory (GPU {i}): {allocated_memory:.2f} MB\n" + f"Reserved GPU memory (GPU {i}): {reserved_memory:.2f} MB\n\n" + ) + elif torch.xpu.is_available(): + gpu_count = torch.xpu.device_count() + for i in range(gpu_count): + gpu_name = torch.xpu.get_device_name(i) + gpu_properties = torch.xpu.get_device_properties(i) + total_memory = gpu_properties.total_memory / (1024**3) # in GB + allocated_memory = torch.xpu.memory_allocated(i) / (1024**2) # in MB + reserved_memory = torch.xpu.memory_reserved(i) / (1024**2) # in MB + + gpu_stats += ( + f"GPU {i} Name: {gpu_name}\n" + f"Total GPU memory (GPU {i}): {total_memory:.2f} GB\n" + f"Allocated GPU memory (GPU {i}): {allocated_memory:.2f} MB\n" + f"Reserved GPU memory (GPU {i}): {reserved_memory:.2f} MB\n\n" + ) + elif torch.backends.mps.is_available(): + gpu_count = 1 + gpu_stats += "MPS GPU\n" + total_memory = psutil.virtual_memory().total / ( + 1024**3 + ) # Total system memory (MPS doesn't have its own memory) + allocated_memory = 0 + reserved_memory = 0 + + gpu_stats += ( + f"Total system memory: {total_memory:.2f} GB\n" + f"Allocated GPU memory (MPS): {allocated_memory:.2f} MB\n" + f"Reserved GPU memory (MPS): {reserved_memory:.2f} MB\n" + ) + + else: + gpu_stats = "No GPU available" + + return gpu_stats + + +def get_cpu_stats(): + cpu_usage = psutil.cpu_percent(interval=1) + memory_info = psutil.virtual_memory() + memory_used = memory_info.used / (1024**2) + memory_total = memory_info.total / (1024**2) + memory_percent = memory_info.percent + + pid = os.getpid() + process = psutil.Process(pid) + nice_value = process.nice() + + cpu_stats = ( + f"CPU Usage: {cpu_usage:.2f}%\n" + f"System Memory: {memory_used:.2f} MB used / {memory_total:.2f} MB total ({memory_percent}% used)\n" + f"Process Priority (Nice value): {nice_value}" + ) + + return cpu_stats + + +def get_combined_stats(): + gpu_stats = get_gpu_stats() + cpu_stats = get_cpu_stats() + combined_stats = f"### GPU Stats\n{gpu_stats}\n\n### CPU Stats\n{cpu_stats}" + return combined_stats + + +def get_audio_select(file_sample): + select_audio_ref = file_sample + select_audio_gen = file_sample + + if file_sample is not None: + select_audio_ref += "_ref.wav" + select_audio_gen += "_gen.wav" + + return select_audio_ref, select_audio_gen + + +with gr.Blocks() as app: + gr.Markdown( + """ +# F5 TTS Automatic Finetune + +This is a local web UI for F5 TTS finetuning support. This app supports the following TTS models: + +* [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching) +* [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS) + +The pretrained checkpoints support English and Chinese. + +For tutorial and updates check here (https://github.com/SWivid/F5-TTS/discussions/143) +""" + ) + + with gr.Row(): + projects, projects_selelect = get_list_projects() + tokenizer_type = gr.Radio(label="Tokenizer Type", choices=["pinyin", "char", "custom"], value="pinyin") + project_name = gr.Textbox(label="Project Name", value="my_speak") + bt_create = gr.Button("Create a New Project") + + with gr.Row(): + cm_project = gr.Dropdown( + choices=projects, value=projects_selelect, label="Project", allow_custom_value=True, scale=6 + ) + ch_refresh_project = gr.Button("Refresh", scale=1) + + bt_create.click(fn=create_data_project, inputs=[project_name, tokenizer_type], outputs=[cm_project]) + + with gr.Tabs(): + with gr.TabItem("Transcribe Data"): + gr.Markdown("""```plaintext +Skip this step if you have your dataset, metadata.csv, and a folder wavs with all the audio files. +```""") + + ch_manual = gr.Checkbox(label="Audio from Path", value=False) + + mark_info_transcribe = gr.Markdown( + """```plaintext + Place your 'wavs' folder and 'metadata.csv' file in the '{your_project_name}' directory. + + my_speak/ + │ + └── dataset/ + ├── audio1.wav + └── audio2.wav + ... + ```""", + visible=False, + ) + + audio_speaker = gr.File(label="Voice", type="filepath", file_count="multiple") + txt_lang = gr.Text(label="Language", value="English") + bt_transcribe = bt_create = gr.Button("Transcribe") + txt_info_transcribe = gr.Text(label="Info", value="") + bt_transcribe.click( + fn=transcribe_all, + inputs=[cm_project, audio_speaker, txt_lang, ch_manual], + outputs=[txt_info_transcribe], + ) + ch_manual.change(fn=check_user, inputs=[ch_manual], outputs=[audio_speaker, mark_info_transcribe]) + + random_sample_transcribe = gr.Button("Random Sample") + + with gr.Row(): + random_text_transcribe = gr.Text(label="Text") + random_audio_transcribe = gr.Audio(label="Audio", type="filepath") + + random_sample_transcribe.click( + fn=get_random_sample_transcribe, + inputs=[cm_project], + outputs=[random_text_transcribe, random_audio_transcribe], + ) + + with gr.TabItem("Vocab Check"): + gr.Markdown("""```plaintext +Check the vocabulary for fine-tuning Emilia_ZH_EN to ensure all symbols are included. For fine-tuning a new language. +```""") + + check_button = gr.Button("Check Vocab") + txt_info_check = gr.Text(label="Info", value="") + + gr.Markdown("""```plaintext +Using the extended model, you can finetune to a new language that is missing symbols in the vocab. This creates a new model with a new vocabulary size and saves it in your ckpts/project folder. +```""") + + exp_name_extend = gr.Radio( + label="Model", choices=["F5TTS_v1_Base", "F5TTS_Base", "E2TTS_Base"], value="F5TTS_v1_Base" + ) + + with gr.Row(): + txt_extend = gr.Textbox( + label="Symbols", + value="", + placeholder="To add new symbols, make sure to use ',' for each symbol", + scale=6, + ) + txt_count_symbol = gr.Textbox(label="New Vocab Size", value="", scale=1) + + extend_button = gr.Button("Extend") + txt_info_extend = gr.Text(label="Info", value="") + + txt_extend.change(vocab_count, inputs=[txt_extend], outputs=[txt_count_symbol]) + check_button.click(fn=vocab_check, inputs=[cm_project], outputs=[txt_info_check, txt_extend]) + extend_button.click( + fn=vocab_extend, inputs=[cm_project, txt_extend, exp_name_extend], outputs=[txt_info_extend] + ) + + with gr.TabItem("Prepare Data"): + gr.Markdown("""```plaintext +Skip this step if you have your dataset, raw.arrow, duration.json, and vocab.txt +```""") + + gr.Markdown( + """```plaintext + Place all your "wavs" folder and your "metadata.csv" file in your project name directory. + + Supported audio formats: "wav", "mp3", "aac", "flac", "m4a", "alac", "ogg", "aiff", "wma", "amr" + + Example wav format: + my_speak/ + │ + ├── wavs/ + │ ├── audio1.wav + │ └── audio2.wav + | ... + │ + └── metadata.csv + + File format metadata.csv: + + audio1|text1 or audio1.wav|text1 or your_path/audio1.wav|text1 + audio2|text1 or audio2.wav|text1 or your_path/audio2.wav|text1 + ... + + ```""" + ) + ch_tokenizern = gr.Checkbox(label="Create Vocabulary", value=False, visible=False) + + bt_prepare = bt_create = gr.Button("Prepare") + txt_info_prepare = gr.Text(label="Info", value="") + txt_vocab_prepare = gr.Text(label="Vocab", value="") + + bt_prepare.click( + fn=create_metadata, inputs=[cm_project, ch_tokenizern], outputs=[txt_info_prepare, txt_vocab_prepare] + ) + + random_sample_prepare = gr.Button("Random Sample") + + with gr.Row(): + random_text_prepare = gr.Text(label="Tokenizer") + random_audio_prepare = gr.Audio(label="Audio", type="filepath") + + random_sample_prepare.click( + fn=get_random_sample_prepare, inputs=[cm_project], outputs=[random_text_prepare, random_audio_prepare] + ) + + with gr.TabItem("Train Model"): + gr.Markdown("""```plaintext +The auto-setting is still experimental. Set a large value of epoch if not sure; and keep last N checkpoints if limited disk space. +If you encounter a memory error, try reducing the batch size per GPU to a smaller number. +```""") + with gr.Row(): + bt_calculate = bt_create = gr.Button("Auto Settings") + lb_samples = gr.Label(label="Samples") + batch_size_type = gr.Radio(label="Batch Size Type", choices=["frame", "sample"], value="frame") + + with gr.Row(): + ch_finetune = bt_create = gr.Checkbox(label="Finetune", value=True) + tokenizer_file = gr.Textbox(label="Tokenizer File", value="") + file_checkpoint_train = gr.Textbox(label="Path to the Pretrained Checkpoint", value="") + + with gr.Row(): + exp_name = gr.Radio( + label="Model", choices=["F5TTS_v1_Base", "F5TTS_Base", "E2TTS_Base"], value="F5TTS_v1_Base" + ) + learning_rate = gr.Number(label="Learning Rate", value=1e-5, step=1e-5) + + with gr.Row(): + batch_size_per_gpu = gr.Number(label="Batch Size per GPU", value=3200) + max_samples = gr.Number(label="Max Samples", value=64) + + with gr.Row(): + grad_accumulation_steps = gr.Number(label="Gradient Accumulation Steps", value=1) + max_grad_norm = gr.Number(label="Max Gradient Norm", value=1.0) + + with gr.Row(): + epochs = gr.Number(label="Epochs", value=100) + num_warmup_updates = gr.Number(label="Warmup Updates", value=100) + + with gr.Row(): + save_per_updates = gr.Number(label="Save per Updates", value=500) + keep_last_n_checkpoints = gr.Number( + label="Keep Last N Checkpoints", + value=-1, + step=1, + precision=0, + info="-1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints", + ) + last_per_updates = gr.Number(label="Last per Updates", value=100) + + with gr.Row(): + ch_8bit_adam = gr.Checkbox(label="Use 8-bit Adam optimizer") + mixed_precision = gr.Radio(label="mixed_precision", choices=["none", "fp16", "bf16"], value="fp16") + cd_logger = gr.Radio(label="logger", choices=["wandb", "tensorboard"], value="wandb") + start_button = gr.Button("Start Training") + stop_button = gr.Button("Stop Training", interactive=False) + + if projects_selelect is not None: + ( + exp_name_value, + learning_rate_value, + batch_size_per_gpu_value, + batch_size_type_value, + max_samples_value, + grad_accumulation_steps_value, + max_grad_norm_value, + epochs_value, + num_warmup_updates_value, + save_per_updates_value, + keep_last_n_checkpoints_value, + last_per_updates_value, + finetune_value, + file_checkpoint_train_value, + tokenizer_type_value, + tokenizer_file_value, + mixed_precision_value, + logger_value, + bnb_optimizer_value, + ) = load_settings(projects_selelect) + + # Assigning values to the respective components + exp_name.value = exp_name_value + learning_rate.value = learning_rate_value + batch_size_per_gpu.value = batch_size_per_gpu_value + batch_size_type.value = batch_size_type_value + max_samples.value = max_samples_value + grad_accumulation_steps.value = grad_accumulation_steps_value + max_grad_norm.value = max_grad_norm_value + epochs.value = epochs_value + num_warmup_updates.value = num_warmup_updates_value + save_per_updates.value = save_per_updates_value + keep_last_n_checkpoints.value = keep_last_n_checkpoints_value + last_per_updates.value = last_per_updates_value + ch_finetune.value = finetune_value + file_checkpoint_train.value = file_checkpoint_train_value + tokenizer_type.value = tokenizer_type_value + tokenizer_file.value = tokenizer_file_value + mixed_precision.value = mixed_precision_value + cd_logger.value = logger_value + ch_8bit_adam.value = bnb_optimizer_value + + ch_stream = gr.Checkbox(label="Stream Output Experiment", value=True) + txt_info_train = gr.Text(label="Info", value="") + + list_audios, select_audio = get_audio_project(projects_selelect, False) + + select_audio_ref = select_audio + select_audio_gen = select_audio + + if select_audio is not None: + select_audio_ref += "_ref.wav" + select_audio_gen += "_gen.wav" + + with gr.Row(): + ch_list_audio = gr.Dropdown( + choices=list_audios, + value=select_audio, + label="Audios", + allow_custom_value=True, + scale=6, + interactive=True, + ) + bt_stream_audio = gr.Button("Refresh", scale=1) + bt_stream_audio.click(fn=get_audio_project, inputs=[cm_project], outputs=[ch_list_audio]) + cm_project.change(fn=get_audio_project, inputs=[cm_project], outputs=[ch_list_audio]) + + with gr.Row(): + audio_ref_stream = gr.Audio(label="Original", type="filepath", value=select_audio_ref) + audio_gen_stream = gr.Audio(label="Generate", type="filepath", value=select_audio_gen) + + ch_list_audio.change( + fn=get_audio_select, + inputs=[ch_list_audio], + outputs=[audio_ref_stream, audio_gen_stream], + ) + + start_button.click( + fn=start_training, + inputs=[ + cm_project, + exp_name, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + grad_accumulation_steps, + max_grad_norm, + epochs, + num_warmup_updates, + save_per_updates, + keep_last_n_checkpoints, + last_per_updates, + ch_finetune, + file_checkpoint_train, + tokenizer_type, + tokenizer_file, + mixed_precision, + ch_stream, + cd_logger, + ch_8bit_adam, + ], + outputs=[txt_info_train, start_button, stop_button], + ) + stop_button.click(fn=stop_training, outputs=[txt_info_train, start_button, stop_button]) + + bt_calculate.click( + fn=calculate_train, + inputs=[ + cm_project, + epochs, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + num_warmup_updates, + ch_finetune, + ], + outputs=[ + epochs, + learning_rate, + batch_size_per_gpu, + max_samples, + num_warmup_updates, + lb_samples, + ], + ) + + ch_finetune.change( + check_finetune, inputs=[ch_finetune], outputs=[file_checkpoint_train, tokenizer_file, tokenizer_type] + ) + + def setup_load_settings(): + output_components = [ + exp_name, + learning_rate, + batch_size_per_gpu, + batch_size_type, + max_samples, + grad_accumulation_steps, + max_grad_norm, + epochs, + num_warmup_updates, + save_per_updates, + keep_last_n_checkpoints, + last_per_updates, + ch_finetune, + file_checkpoint_train, + tokenizer_type, + tokenizer_file, + mixed_precision, + cd_logger, + ch_8bit_adam, + ] + return output_components + + outputs = setup_load_settings() + + cm_project.change( + fn=load_settings, + inputs=[cm_project], + outputs=outputs, + ) + + ch_refresh_project.click( + fn=load_settings, + inputs=[cm_project], + outputs=outputs, + ) + + with gr.TabItem("Test Model"): + gr.Markdown("""```plaintext +SOS: Check the use_ema setting (True or False) for your model to see what works best for you. use seed -1 from random +```""") + exp_name = gr.Radio( + label="Model", choices=["F5TTS_v1_Base", "F5TTS_Base", "E2TTS_Base"], value="F5TTS_v1_Base" + ) + list_checkpoints, checkpoint_select = get_checkpoints_project(projects_selelect, False) + + with gr.Row(): + nfe_step = gr.Number(label="NFE Step", value=32) + speed = gr.Slider(label="Speed", value=1.0, minimum=0.3, maximum=2.0, step=0.1) + seed = gr.Number(label="Seed", value=-1, minimum=-1) + remove_silence = gr.Checkbox(label="Remove Silence") + + ch_use_ema = gr.Checkbox(label="Use EMA", value=True) + with gr.Row(): + cm_checkpoint = gr.Dropdown( + choices=list_checkpoints, value=checkpoint_select, label="Checkpoints", allow_custom_value=True + ) + bt_checkpoint_refresh = gr.Button("Refresh") + + random_sample_infer = gr.Button("Random Sample") + + ref_text = gr.Textbox(label="Ref Text") + ref_audio = gr.Audio(label="Audio Ref", type="filepath") + gen_text = gr.Textbox(label="Gen Text") + + random_sample_infer.click( + fn=get_random_sample_infer, inputs=[cm_project], outputs=[ref_text, gen_text, ref_audio] + ) + + with gr.Row(): + txt_info_gpu = gr.Textbox("", label="Device") + seed_info = gr.Text(label="Seed :") + check_button_infer = gr.Button("Infer") + + gen_audio = gr.Audio(label="Audio Gen", type="filepath") + + check_button_infer.click( + fn=infer, + inputs=[ + cm_project, + cm_checkpoint, + exp_name, + ref_text, + ref_audio, + gen_text, + nfe_step, + ch_use_ema, + speed, + seed, + remove_silence, + ], + outputs=[gen_audio, txt_info_gpu, seed_info], + ) + + bt_checkpoint_refresh.click(fn=get_checkpoints_project, inputs=[cm_project], outputs=[cm_checkpoint]) + cm_project.change(fn=get_checkpoints_project, inputs=[cm_project], outputs=[cm_checkpoint]) + + with gr.TabItem("Prune Checkpoint"): + gr.Markdown("""```plaintext +Reduce the Base model size from 5GB to 1.3GB. The new checkpoint file prunes out optimizer and etc., can be used for inference or finetuning afterward, but not able to resume pretraining. +```""") + txt_path_checkpoint = gr.Text(label="Path to Checkpoint:") + txt_path_checkpoint_small = gr.Text(label="Path to Output:") + ch_safetensors = gr.Checkbox(label="Safetensors", value="") + txt_info_reduse = gr.Text(label="Info", value="") + reduse_button = gr.Button("Reduce") + reduse_button.click( + fn=extract_and_save_ema_model, + inputs=[txt_path_checkpoint, txt_path_checkpoint_small, ch_safetensors], + outputs=[txt_info_reduse], + ) + + with gr.TabItem("System Info"): + output_box = gr.Textbox(label="GPU and CPU Information", lines=20) + + def update_stats(): + return get_combined_stats() + + update_button = gr.Button("Update Stats") + update_button.click(fn=update_stats, outputs=output_box) + + def auto_update(): + yield gr.update(value=update_stats()) + + gr.update(fn=auto_update, inputs=[], outputs=output_box) + + +@click.command() +@click.option("--port", "-p", default=None, type=int, help="Port to run the app on") +@click.option("--host", "-H", default=None, help="Host to run the app on") +@click.option( + "--share", + "-s", + default=False, + is_flag=True, + help="Share the app via Gradio share link", +) +@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access") +def main(port, host, share, api): + global app + print("Starting app...") + app.queue(api_open=api).launch(server_name=host, server_port=port, share=share, show_api=api) + + +if __name__ == "__main__": + main() diff --git a/src/f5_tts/train/train.py b/src/f5_tts/train/train.py new file mode 100644 index 0000000000000000000000000000000000000000..2e191a3707a23a710bc7d98510ee7c3df50ea4ca --- /dev/null +++ b/src/f5_tts/train/train.py @@ -0,0 +1,76 @@ +# training script. + +import os +from importlib.resources import files + +import hydra +from omegaconf import OmegaConf + +from f5_tts.model import CFM, DiT, UNetT, Trainer # noqa: F401. used for config +from f5_tts.model.dataset import load_dataset +from f5_tts.model.utils import get_tokenizer + +os.chdir(str(files("f5_tts").joinpath("../.."))) # change working directory to root of project (local editable) + + +@hydra.main(version_base="1.3", config_path=str(files("f5_tts").joinpath("configs")), config_name=None) +def main(cfg): + model_cls = globals()[cfg.model.backbone] + model_arc = cfg.model.arch + tokenizer = cfg.model.tokenizer + mel_spec_type = cfg.model.mel_spec.mel_spec_type + + exp_name = f"{cfg.model.name}_{mel_spec_type}_{cfg.model.tokenizer}_{cfg.datasets.name}" + wandb_resume_id = None + + # set text tokenizer + if tokenizer != "custom": + tokenizer_path = cfg.datasets.name + else: + tokenizer_path = cfg.model.tokenizer_path + vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer) + + # set model + model = CFM( + transformer=model_cls(**model_arc, text_num_embeds=vocab_size, mel_dim=cfg.model.mel_spec.n_mel_channels), + mel_spec_kwargs=cfg.model.mel_spec, + vocab_char_map=vocab_char_map, + ) + + # init trainer + trainer = Trainer( + model, + epochs=cfg.optim.epochs, + learning_rate=cfg.optim.learning_rate, + num_warmup_updates=cfg.optim.num_warmup_updates, + save_per_updates=cfg.ckpts.save_per_updates, + keep_last_n_checkpoints=cfg.ckpts.keep_last_n_checkpoints, + checkpoint_path=str(files("f5_tts").joinpath(f"../../{cfg.ckpts.save_dir}")), + batch_size_per_gpu=cfg.datasets.batch_size_per_gpu, + batch_size_type=cfg.datasets.batch_size_type, + max_samples=cfg.datasets.max_samples, + grad_accumulation_steps=cfg.optim.grad_accumulation_steps, + max_grad_norm=cfg.optim.max_grad_norm, + logger=cfg.ckpts.logger, + wandb_project="CFM-TTS", + wandb_run_name=exp_name, + wandb_resume_id=wandb_resume_id, + last_per_updates=cfg.ckpts.last_per_updates, + log_samples=cfg.ckpts.log_samples, + bnb_optimizer=cfg.optim.bnb_optimizer, + mel_spec_type=mel_spec_type, + is_local_vocoder=cfg.model.vocoder.is_local, + local_vocoder_path=cfg.model.vocoder.local_path, + cfg_dict=OmegaConf.to_container(cfg, resolve=True), + ) + + train_dataset = load_dataset(cfg.datasets.name, tokenizer, mel_spec_kwargs=cfg.model.mel_spec) + trainer.train( + train_dataset, + num_workers=cfg.datasets.num_workers, + resumable_with_seed=666, # seed for shuffling dataset + ) + + +if __name__ == "__main__": + main() diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000000000000000000000000000000000000..9bc615340ab22c7d4da9c512a8ab000fa822ab04 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,104 @@ + + + + + + F5-TTS Suy Luận Tiếng Việt + + + +
+

F5-TTS Suy Luận Tiếng Việt

+

Chọn file âm thanh tham chiếu và nhập văn bản để tạo giọng nói.

+ + +
Waveform sẽ hiển thị ở đây...
+ + + + +
+
+ + + + + + + diff --git a/templates/index1.html b/templates/index1.html new file mode 100644 index 0000000000000000000000000000000000000000..1aa583aa39dd41aabd85fbc45c309af91e344460 --- /dev/null +++ b/templates/index1.html @@ -0,0 +1,64 @@ + + + + + + F5-TTS Suy Luận Tiếng Việt + + + +
+

F5-TTS Suy Luận Tiếng Việt

+

Chọn file âm thanh tham chiếu và nhập văn bản để tạo giọng nói.

+ + + + + +
+
+ + \ No newline at end of file diff --git a/test_f5_tts.py b/test_f5_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..5314bc12544f0abe6833640d3403c495436623e6 --- /dev/null +++ b/test_f5_tts.py @@ -0,0 +1,54 @@ +import subprocess +import os +import sys + +def run_f5_tts(): + # Lấy đường dẫn tuyệt đối của file hiện tại + current_dir = os.path.dirname(os.path.abspath(__file__)) + + # Lấy đường dẫn tuyệt đối đến file infer_cli.py + infer_cli_path = os.path.join(current_dir, "src", "f5_tts", "infer", "infer_cli.py") + + # Định nghĩa tham số + model = "F5TTS_Base" + ref_text = "bà nói cái chuyện gì tôi nhớ à, còn chuyện gì tôi hỏng nhớ." + ref_audio = "clon/ONG_GIA.mp3" + gen_text = "tình yêu là gì? mà nó có thể làm con người ta đau khổ đến như vậy?" + + speed = 1.0 + vocoder_name = "vocos" + vocab_file = os.path.join(current_dir, "F5-TTS-MRSU", "vocab.txt") + ckpt_file = os.path.join(current_dir, "F5-TTS-MRSU", "model_last.pt") + + # ✅ Thiết lập biến môi trường tương đương lệnh: $env:PYTHONIOENCODING="utf-8" + os.environ["PYTHONIOENCODING"] = "utf-8" + + # Gọi python để chạy file infer_cli.py trực tiếp + command = [ + sys.executable, # dùng đúng Python đang chạy script này + infer_cli_path, + "--model", model, + "--ref_audio", ref_audio, + "--ref_text", ref_text, + "--gen_text", gen_text, + "--speed", str(speed), + "--vocoder_name", vocoder_name, + "--vocab_file", vocab_file, + "--ckpt_file", ckpt_file + ] + + try: + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True + ) + print("✅ Kết quả:\n", result.stdout) + except subprocess.CalledProcessError as e: + print("❌ Lỗi khi chạy suy luận:\n", e.stderr) + except Exception as e: + print("❌ Lỗi không xác định:\n", str(e)) + +if __name__ == "__main__": + run_f5_tts()