Spaces:
Running
Running
File size: 4,464 Bytes
14e7761 153d7d2 14e7761 153d7d2 14e7761 153d7d2 14e7761 6e103ce 14e7761 6e103ce 14e7761 6e103ce 14e7761 153d7d2 14e7761 6e103ce 14e7761 6e103ce 14e7761 db5e575 14e7761 db5e575 14e7761 153d7d2 14e7761 153d7d2 14e7761 153d7d2 14e7761 153d7d2 14e7761 67907d1 ab5571b bb98a6b d75a6d9 bb98a6b d75a6d9 bb98a6b 14e7761 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import os
import torch
import shutil
import librosa
import warnings
import numpy as np
import gradio as gr
import librosa.display
import matplotlib.pyplot as plt
from model import EvalNet
from utils import get_modelist, find_wav_files, embed_img
TRANSLATE = {
"vibrato": "Rou xian",
"trill": "Chan yin",
"tremolo": "Chan gong",
"staccato": "Dun gong",
"ricochet": "Pao gong",
"pizzicato": "Bo xian",
"percussive": "Ji gong",
"legato_slide_glissando": "Lian hua yin",
"harmonic": "Fan yin",
"diangong": "Dian gong",
"detache": "Fen gong",
}
CLASSES = list(TRANSLATE.keys())
TEMP_DIR = "./__pycache__/tmp"
SAMPLE_RATE = 44100
def circular_padding(y: np.ndarray, sr: int, dur=3):
if len(y) >= sr * dur:
return y[: sr * dur]
size = sr * dur // len(y) + int((sr * dur) % len(y) > 0)
arrays = []
for _ in range(size):
arrays.append(y)
y = np.hstack(arrays)
return y[: sr * dur]
def wav2mel(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
librosa.display.specshow(log_mel_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def wav2cqt(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
cqt_spec = librosa.cqt(y=y, sr=sr)
log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
librosa.display.specshow(log_cqt_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def wav2chroma(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
librosa.display.specshow(log_chroma_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
if not wav_path:
return None, "Please input an audio!"
spec = log_name.split("_")[-3]
os.makedirs(folder_path, exist_ok=True)
try:
model = EvalNet(log_name, len(TRANSLATE)).model
eval("wav2%s" % spec)(wav_path)
except Exception as e:
return None, f"{e}"
input = embed_img(f"{folder_path}/output.jpg")
output: torch.Tensor = model(input)
pred_id = torch.max(output.data, 1)[1]
return (
os.path.basename(wav_path),
f"{TRANSLATE[CLASSES[pred_id]]} ({CLASSES[pred_id].capitalize()})",
)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
models = get_modelist(assign_model="Swin_T_mel")
examples = []
example_wavs = find_wav_files()
for wav in example_wavs:
examples.append([wav, models[0]])
with gr.Blocks() as demo:
gr.Interface(
fn=infer,
inputs=[
gr.Audio(label="Upload a recording", type="filepath"),
gr.Dropdown(choices=models, label="Select a model", value=models[0]),
],
outputs=[
gr.Textbox(label="Audio filename", show_copy_button=True),
gr.Textbox(label="Playing tech recognition", show_copy_button=True),
],
examples=examples,
cache_examples=False,
allow_flagging="never",
title="It is recommended to keep the recording length around 3s.",
)
gr.Markdown(
"""
# Cite
```bibtex
@article{Zhou-2025,
author = {Monan Zhou and Shenyang Xu and Zhaorui Liu and Zhaowen Wang and Feng Yu and Wei Li and Baoqiang Han},
title = {CCMusic: An Open and Diverse Database for Chinese Music Information Retrieval Research},
journal = {Transactions of the International Society for Music Information Retrieval},
volume = {8},
number = {1},
pages = {22--38},
month = {Mar},
year = {2025},
url = {https://doi.org/10.5334/tismir.194},
doi = {10.5334/tismir.194}
}
```"""
)
demo.launch()
|