Spaces:
Runtime error
Runtime error
File size: 3,719 Bytes
a89496d 7bb4fe3 8cb3861 a89496d ccecb22 4a7a6b8 a89496d 8125531 c7866f1 4c773e2 0fbacb2 a89496d b45dcb0 8a3bb8f 8cb3861 b45dcb0 8a3bb8f 8cb3861 b45dcb0 8cb3861 a89496d 8f8de0d a89496d d8d3e30 9a9a2c9 0fbacb2 9a9a2c9 a89496d bd1743b e0a5f6f bd1743b 9325b1e a89496d 4a7a6b8 e0a5f6f 8cb3861 8a3bb8f 8cb3861 8a3bb8f 8cb3861 8125531 78ec3fa e0a5f6f bd1743b 9325b1e e0a5f6f 4a7a6b8 e0a5f6f 8cb3861 8a3bb8f 8cb3861 8a3bb8f 8cb3861 8125531 78ec3fa 8125531 bd1743b 9325b1e 8125531 4a7a6b8 8125531 8cb3861 8a3bb8f 8cb3861 8a3bb8f 8cb3861 8125531 78ec3fa 8125531 a89496d 78820af a89496d 020bd46 a89496d 4c773e2 9589cd1 d8d3e30 020bd46 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
defaults:
- _self_
- model: null
- effects: all
seed: 12345
train: True
sample_rate: 48000
chunk_size: 262144 # 5.5s
logs_dir: "./logs"
render_files: True
render_root: "./data"
accelerator: null
log_audio: True
# Effects
num_kept_effects: [2,2] # [min, max]
num_removed_effects: [2,2] # [min, max]
shuffle_kept_effects: True
shuffle_removed_effects: False
num_classes: 5
effects_to_keep:
- reverb
- chorus
- delay
effects_to_remove:
- compressor
- distortion
callbacks:
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
monitor: "valid_loss" # name of the logged metric which determines when model is improving
save_top_k: 1 # save k best models (determined by above metric)
save_last: True # additionaly always save model from last epoch
mode: "min" # can be "max" or "min"
verbose: False
dirpath: ${logs_dir}/ckpts/${now:%Y-%m-%d-%H-%M-%S}
filename: '{epoch:02d}-{valid_loss:.3f}'
learning_rate_monitor:
_target_: pytorch_lightning.callbacks.LearningRateMonitor
logging_interval: "step"
audio_logging:
_target_: remfx.callbacks.AudioCallback
sample_rate: ${sample_rate}
log_audio: ${log_audio}
metric_logging:
_target_: remfx.callbacks.MetricCallback
datamodule:
_target_: remfx.datasets.EffectDatamodule
train_dataset:
_target_: remfx.datasets.EffectDataset
total_chunks: 8000
sample_rate: ${sample_rate}
root: ${oc.env:DATASET_ROOT}
chunk_size: ${chunk_size}
mode: "train"
effect_modules: ${effects}
effects_to_keep: ${effects_to_keep}
effects_to_remove: ${effects_to_remove}
num_kept_effects: ${num_kept_effects}
num_removed_effects: ${num_removed_effects}
shuffle_kept_effects: ${shuffle_kept_effects}
shuffle_removed_effects: ${shuffle_removed_effects}
render_files: ${render_files}
render_root: ${render_root}
val_dataset:
_target_: remfx.datasets.EffectDataset
total_chunks: 1000
sample_rate: ${sample_rate}
root: ${oc.env:DATASET_ROOT}
chunk_size: ${chunk_size}
mode: "val"
effect_modules: ${effects}
effects_to_keep: ${effects_to_keep}
effects_to_remove: ${effects_to_remove}
num_kept_effects: ${num_kept_effects}
num_removed_effects: ${num_removed_effects}
shuffle_kept_effects: ${shuffle_kept_effects}
shuffle_removed_effects: ${shuffle_removed_effects}
render_files: ${render_files}
render_root: ${render_root}
test_dataset:
_target_: remfx.datasets.EffectDataset
total_chunks: 1000
sample_rate: ${sample_rate}
root: ${oc.env:DATASET_ROOT}
chunk_size: ${chunk_size}
mode: "test"
effect_modules: ${effects}
effects_to_keep: ${effects_to_keep}
effects_to_remove: ${effects_to_remove}
num_kept_effects: ${num_kept_effects}
num_removed_effects: ${num_removed_effects}
shuffle_kept_effects: ${shuffle_kept_effects}
shuffle_removed_effects: ${shuffle_removed_effects}
render_files: ${render_files}
render_root: ${render_root}
batch_size: 16
num_workers: 8
pin_memory: True
persistent_workers: True
logger:
_target_: pytorch_lightning.loggers.WandbLogger
project: ${oc.env:WANDB_PROJECT}
entity: ${oc.env:WANDB_ENTITY}
# offline: False # set True to store all logs only locally
job_type: "train"
group: ""
save_dir: "."
trainer:
_target_: pytorch_lightning.Trainer
precision: 32 # Precision used for tensors, default `32`
min_epochs: 0
max_epochs: -1
enable_model_summary: False
log_every_n_steps: 1 # Logs metrics every N batches
accumulate_grad_batches: 1
accelerator: ${accelerator}
devices: 1
gradient_clip_val: 10.0
max_steps: 50000
|