audio: | |
chunk_size: 261632 | |
dim_f: 4096 | |
dim_t: 512 | |
hop_length: 512 | |
n_fft: 8192 | |
num_channels: 2 | |
sample_rate: 44100 | |
min_mean_abs: 0.001 | |
model: | |
encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders- | |
decoder_type: unet # unet, fpn | |
act: gelu | |
num_channels: 128 | |
num_subbands: 8 | |
loss_multistft: | |
fft_sizes: | |
- 1024 | |
- 2048 | |
- 4096 | |
hop_sizes: | |
- 512 | |
- 1024 | |
- 2048 | |
win_lengths: | |
- 1024 | |
- 2048 | |
- 4096 | |
window: "hann_window" | |
scale: "mel" | |
n_bins: 128 | |
sample_rate: 44100 | |
perceptual_weighting: true | |
w_sc: 1.0 | |
w_log_mag: 1.0 | |
w_lin_mag: 0.0 | |
w_phs: 0.0 | |
mag_distance: "L1" | |
training: | |
batch_size: 8 | |
gradient_accumulation_steps: 1 | |
grad_clip: 0 | |
instruments: | |
- vocals | |
- other | |
lr: 5.0e-05 | |
patience: 2 | |
reduce_factor: 0.95 | |
target_instrument: null | |
num_epochs: 1000 | |
num_steps: 2000 | |
q: 0.95 | |
coarse_loss_clip: true | |
ema_momentum: 0.999 | |
optimizer: adamw | |
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental | |
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true | |
augmentations: | |
enable: true # enable or disable all augmentations (to fast disable if needed) | |
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) | |
loudness_min: 0.5 | |
loudness_max: 1.5 | |
mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) | |
mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) | |
- 0.2 | |
- 0.02 | |
mixup_loudness_min: 0.5 | |
mixup_loudness_max: 1.5 | |
inference: | |
batch_size: 1 | |
dim_t: 512 | |
num_overlap: 4 |