|
import os |
|
os.system('pip install gradio==3.34.0') |
|
os.system('pip freeze') |
|
import sys |
|
sys.path.append('.') |
|
import gradio as gr |
|
os.system('pip install -U torchtext==0.8.0') |
|
|
|
from scipy.io import wavfile |
|
|
|
|
|
split_at_timestamp = 6 |
|
|
|
os.system('./separate_scripts/download_checkpoints.sh') |
|
|
|
def inference(audio): |
|
|
|
rate, data = wavfile.read(audio.name) |
|
|
|
|
|
split_at_frame = rate * split_at_timestamp |
|
|
|
|
|
left_data, right_data = data[:split_at_frame-1], data[split_at_frame:] |
|
|
|
|
|
wavfile.write('foo_left.wav', rate, left_data) |
|
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_vocals.mp3""") |
|
|
|
os.system("""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path=foo_left.wav --output_path=sep_accompaniment.mp3""") |
|
|
|
|
|
|
|
return 'sep_vocals.mp3', 'sep_accompaniment.mp3' |
|
title = "Music Source Separation" |
|
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Input Audio is trimmed to 6 seconds to keep inference time fast for each user on cpu. Currently supports .wav files. Read more at the links below." |
|
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>" |
|
|
|
examples = [['example.wav']] |
|
gr.Interface( |
|
inference, |
|
gr.inputs.Audio(type="file", label="Input"), |
|
[gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")], |
|
title=title, |
|
description=description, |
|
article=article, |
|
enable_queue=True, |
|
examples=examples |
|
).launch(debug=True) |