Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 6,540 Bytes
fc10d73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
import webrtcvad
import torch.multiprocessing as mp
import os
import threading
from tqdm import tqdm
import sys
from scipy.io.wavfile import write
import traceback
import librosa
import argparse
import glob
import time
import random

vocal_file_lock = threading.Lock()
bgm_file_lock = threading.Lock()

from vad_tool import read_wave_to_frames, read_wave_to_frames_withbgm, vad_generator, cut_points_generator, cut_points_storage_generator, wavs_generator

LOGGING_INTERVAL = 3
#SAMPLE_RATE = 44100
#SAMPLE_RATE = 16000
SAMPLE_RATE = 48000
SAVE_SAMPLE_RATE = 44100
FRAME_DURATION = 10

SAVE_SAMPLE_PER_FRAME = int(FRAME_DURATION * SAVE_SAMPLE_RATE / 1000)

MIN_ACTIVE_TIME_MS = 200
SIL_HEAD_TAIL_MS = 500
#SIL_HEAD_TAIL_MS = 3000
SIL_MID_MS = 3000
CUT_MIN_MS = 3000
CUT_MAX_MS = 30000

MIN_ACTIVE_FRAME = MIN_ACTIVE_TIME_MS // FRAME_DURATION
SIL_FRAME = SIL_HEAD_TAIL_MS // FRAME_DURATION
SIL_MID_FRAME = SIL_MID_MS // FRAME_DURATION
CUT_MIN_FRAME = CUT_MIN_MS // FRAME_DURATION
CUT_MAX_FRAME = CUT_MAX_MS // FRAME_DURATION
RANDOM_MIN_FRAME = True

import torch

def gpu_holder(rank, a):
    device=f'cuda:{rank}'
    conv = torch.nn.Conv1d(1024, 1024, 9, padding=4)
    conv.to(device)
    while True:
        x = torch.rand((8, 1024, 128), device=device)
        y = conv(x)



    
def inference(rank, out_dir, filelist_name, queue: mp.Queue):
    vocal_out_dir = os.path.join(out_dir, "vocal_cut")
    bgm_out_dir = os.path.join(out_dir, "bgm_cut")
    info_dir = os.path.join(out_dir, "vad_info")
    os.makedirs(vocal_out_dir, exist_ok=True)
    os.makedirs(bgm_out_dir, exist_ok=True)
    os.makedirs(info_dir, exist_ok=True)

    def write_to_file(file_path, data, file_lock):
        with file_lock:
            with open(file_path, 'a') as f:
                f.write(data)
    while True:
        input_path = queue.get()
        if input_path is None:
            break
        try:
            vad_tools = webrtcvad.Vad(3) # create a new vad each time to avoid some bugs
            vocal_path, bgm_path = input_path[0]
            filename = os.path.basename(vocal_path).replace(".wav", "")
            #frames, wav = read_wave_to_frames(vocal_path, SAMPLE_RATE, FRAME_DURATION)
            frames, wav, vocal_wav, bgm_wav = read_wave_to_frames_withbgm(vocal_path, bgm_path, SAMPLE_RATE, SAVE_SAMPLE_RATE, FRAME_DURATION)
            vad_info = vad_generator(frames, SAMPLE_RATE, vad_tools)

            cut_points = cut_points_generator(vad_info, MIN_ACTIVE_FRAME, SIL_FRAME, SIL_MID_FRAME, CUT_MIN_FRAME, CUT_MAX_FRAME, RANDOM_MIN_FRAME)
            raw_vad_content, file_content = cut_points_storage_generator(vad_info, cut_points, FRAME_DURATION)

            with open(os.path.join(info_dir, filename+".raw_info.txt"), "w") as f:
                f.write(raw_vad_content)
            with open(os.path.join(info_dir, filename+".txt"), "w") as f:
                f.write(file_content)

            wavs = wavs_generator(vocal_wav, cut_points, filename, SAVE_SAMPLE_RATE, FRAME_DURATION)
            bgm_wavs = wavs_generator(bgm_wav, cut_points, filename, SAVE_SAMPLE_RATE, FRAME_DURATION)
            for ((wav_seg, name), (bgm_wav_seg, _)) in zip(wavs, bgm_wavs):
                if wav_seg.shape[-1] < SAVE_SAMPLE_RATE * CUT_MIN_MS / 1000:
                    continue
                write(os.path.join(vocal_out_dir, name), SAVE_SAMPLE_RATE, wav_seg)
                write(os.path.join(bgm_out_dir, name), SAVE_SAMPLE_RATE, bgm_wav_seg)

        except Exception as e:
            traceback.print_exc()
            print(e)

def setInterval(interval):
    def decorator(function):
        def wrapper(*args, **kwargs):
            stopped = threading.Event()

            def loop():  # executed in another thread
                while not stopped.wait(interval):  # until stopped
                    function(*args, **kwargs)

            t = threading.Thread(target=loop)
            t.daemon = True  # stop if the program exits
            t.start()
            return stopped

        return wrapper

    return decorator


last_batches = None


@setInterval(LOGGING_INTERVAL)
def QueueWatcher(queue, bar):
    global last_batches
    curr_batches = queue.qsize()
    bar.update(last_batches-curr_batches)
    last_batches = curr_batches


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--filelist_or_dir", type=str, required=True, help="Path to save checkpoints")
    parser.add_argument("--out_dir", type=str, required=True, help="Path to save checkpoints")
    parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints")
    parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints")
    parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints")
    args = parser.parse_args()

    filelist_or_dir = args.filelist_or_dir
    out_dir = args.out_dir
    NUM_THREADS = args.jobs

    if os.path.isfile(filelist_or_dir):
        filelist_name = filelist_or_dir.split('/')[-1].split('.')[0]
        generator = [os.path.basename(x) for x in open(filelist_or_dir).read().splitlines()]
    else:
        filelist_name = "single"
        generator = [(os.path.join(os.path.dirname(os.path.dirname(x)), "vocal", os.path.basename(x)), os.path.join(os.path.dirname(os.path.dirname(x)), "bgm", os.path.basename(x))) for x in glob.glob(f"{filelist_or_dir}/*.wav")]
    
    #mp.set_start_method('spawn',force=True)

    print(f"Running with {NUM_THREADS} threads and batchsize 1")
    processes = []
    queue = mp.Queue()
    for rank in range(NUM_THREADS):
        p = mp.Process(target=inference, args=(rank, out_dir, filelist_name, queue), daemon=True)
        p.start()
        processes.append(p)

    for i in range(4):
        rank = i % torch.cuda.device_count()
        p = mp.Process(target=gpu_holder, args=(rank, 0), daemon=True)
        p.start()
        #processes.append(p)

    accum = []
    tmp_file = []
    

    for filename in tqdm(generator):
        #accum.append((os.path.join(out_dir, "vocal", filename), os.path.join(out_dir, "bgm", filename)))
        accum.append(filename)
        if len(accum) == 1:
            queue.put(accum.copy())
            accum.clear()


    for _ in range(NUM_THREADS):
        queue.put(None)

    last_batches = queue.qsize()
    bar = tqdm(total=last_batches)
    queue_watcher = QueueWatcher(queue, bar)
    for p in processes:
        p.join()
    queue_watcher.set()