Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
RapBank / data_pipeline /seperation /configs /config_vocals_scnet.yaml
zqning's picture
init data
fc10d73 verified
audio:
chunk_size: 264600
num_channels: 2
sample_rate: 44100
min_mean_abs: 0.000
model:
sources: ['vocals', 'other']
audio_channels: 2
# dims: [4, 32, 64, 128] # small version
dims: [4, 64, 128, 256]
nfft: 4096
hop_size: 1024
win_size: 4096
normalized: True
band_configs: {
'low': { 'SR': .175, 'stride': 1, 'kernel': 3 },
'mid': { 'SR': .392, 'stride': 4, 'kernel': 4 },
'high': { 'SR': .433, 'stride': 16, 'kernel': 16 }
}
conv_depths: [3, 2, 1]
compress: 4
conv_kernel: 3
# Dual-path RNN
num_dplayer: 6
expand: 1
# mamba
use_mamba: False
mamba_config: {
'd_stat': 16,
'd_conv': 4,
'd_expand': 2
}
training:
batch_size: 4
gradient_accumulation_steps: 2
grad_clip: 0
instruments:
- vocals
- other
lr: 5.0e-04
patience: 2
reduce_factor: 0.95
target_instrument: null
num_epochs: 1000
num_steps: 1000
q: 0.95
coarse_loss_clip: true
ema_momentum: 0.999
optimizer: adam
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
augmentations:
enable: true # enable or disable all augmentations (to fast disable if needed)
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
loudness_min: 0.5
loudness_max: 1.5
mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
mixup_probs:
!!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
- 0.2
- 0.02
mixup_loudness_min: 0.5
mixup_loudness_max: 1.5
inference:
batch_size: 8
dim_t: 256
num_overlap: 4