audio: | |
chunk_size: 352800 | |
dim_f: 1024 | |
dim_t: 256 | |
hop_length: 441 | |
n_fft: 2048 | |
num_channels: 2 | |
sample_rate: 44100 | |
min_mean_abs: 0.000 | |
model: | |
dim: 384 | |
depth: 6 | |
stereo: true | |
num_stems: 4 | |
time_transformer_depth: 1 | |
freq_transformer_depth: 1 | |
linear_transformer_depth: 0 | |
num_bands: 60 | |
dim_head: 64 | |
heads: 8 | |
attn_dropout: 0 | |
ff_dropout: 0 | |
flash_attn: True | |
dim_freqs_in: 1025 | |
sample_rate: 44100 # needed for mel filter bank from librosa | |
stft_n_fft: 2048 | |
stft_hop_length: 441 | |
stft_win_length: 2048 | |
stft_normalized: False | |
mask_estimator_depth: 2 | |
multi_stft_resolution_loss_weight: 1.0 | |
multi_stft_resolutions_window_sizes: !!python/tuple | |
- 4096 | |
- 2048 | |
- 1024 | |
- 512 | |
- 256 | |
multi_stft_hop_size: 147 | |
multi_stft_normalized: False | |
mlp_expansion_factor: 4 # Probably too big (requires a lot of memory for weights) | |
use_torch_checkpoint: False # it allows to greatly reduce GPU memory consumption during training (not fully tested) | |
skip_connection: False # Enable skip connection between transformer blocks - can solve problem with gradients and probably faster training | |
training: | |
batch_size: 1 | |
gradient_accumulation_steps: 1 | |
grad_clip: 0 | |
instruments: | |
- drums | |
- bass | |
- other | |
- vocals | |
lr: 1.0e-05 | |
patience: 2 | |
reduce_factor: 0.95 | |
target_instrument: null | |
num_epochs: 1000 | |
num_steps: 1000 | |
augmentation: false # enable augmentations by audiomentations and pedalboard | |
augmentation_type: null | |
use_mp3_compress: false # Deprecated | |
augmentation_mix: false # Mix several stems of the same type with some probability | |
augmentation_loudness: false # randomly change loudness of each stem | |
augmentation_loudness_type: 1 # Type 1 or 2 | |
augmentation_loudness_min: 0 | |
augmentation_loudness_max: 0 | |
q: 0.95 | |
coarse_loss_clip: false | |
ema_momentum: 0.999 | |
optimizer: adam | |
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental | |
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true | |
augmentations: | |
enable: true # enable or disable all augmentations (to fast disable if needed) | |
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) | |
loudness_min: 0.5 | |
loudness_max: 1.5 | |
mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) | |
mixup_probs: | |
!!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) | |
- 0.2 | |
- 0.02 | |
mixup_loudness_min: 0.5 | |
mixup_loudness_max: 1.5 | |
all: | |
channel_shuffle: 0.5 # Set 0 or lower to disable | |
random_inverse: 0.1 # inverse track (better lower probability) | |
random_polarity: 0.5 # polarity change (multiply waveform to -1) | |
inference: | |
batch_size: 4 | |
dim_t: 256 | |
num_overlap: 2 |