mattricesound commited on
Commit
01d6a17
·
1 Parent(s): 1e53a02

Re-add change to config output processed files in separate dir

Browse files
Files changed (2) hide show
  1. cfg/config.yaml +4 -0
  2. remfx/datasets.py +3 -1
cfg/config.yaml CHANGED
@@ -9,6 +9,7 @@ sample_rate: 48000
9
  logs_dir: "./logs"
10
  log_every_n_steps: 1000
11
  render_files: True
 
12
 
13
  callbacks:
14
  model_checkpoint:
@@ -31,6 +32,7 @@ datamodule:
31
  mode: "train"
32
  effect_types: ${effects.train_effects}
33
  render_files: ${render_files}
 
34
  val_dataset:
35
  _target_: remfx.datasets.VocalSet
36
  sample_rate: ${sample_rate}
@@ -39,6 +41,7 @@ datamodule:
39
  mode: "val"
40
  effect_types: ${effects.val_effects}
41
  render_files: ${render_files}
 
42
  test_dataset:
43
  _target_: remfx.datasets.VocalSet
44
  sample_rate: ${sample_rate}
@@ -47,6 +50,7 @@ datamodule:
47
  mode: "test"
48
  effect_types: ${effects.val_effects}
49
  render_files: ${render_files}
 
50
 
51
  batch_size: 16
52
  num_workers: 8
 
9
  logs_dir: "./logs"
10
  log_every_n_steps: 1000
11
  render_files: True
12
+ rendered_root: ${oc.env:DATASET_ROOT}
13
 
14
  callbacks:
15
  model_checkpoint:
 
32
  mode: "train"
33
  effect_types: ${effects.train_effects}
34
  render_files: ${render_files}
35
+ render_root: ${rendered_root}
36
  val_dataset:
37
  _target_: remfx.datasets.VocalSet
38
  sample_rate: ${sample_rate}
 
41
  mode: "val"
42
  effect_types: ${effects.val_effects}
43
  render_files: ${render_files}
44
+ render_root: ${rendered_root}
45
  test_dataset:
46
  _target_: remfx.datasets.VocalSet
47
  sample_rate: ${sample_rate}
 
50
  mode: "test"
51
  effect_types: ${effects.val_effects}
52
  render_files: ${render_files}
53
+ render_root: ${rendered_root}
54
 
55
  batch_size: 16
56
  num_workers: 8
remfx/datasets.py CHANGED
@@ -20,12 +20,14 @@ class VocalSet(Dataset):
20
  chunk_size_in_sec: int = 3,
21
  effect_types: List[torch.nn.Module] = None,
22
  render_files: bool = True,
 
23
  mode: str = "train",
24
  ):
25
  super().__init__()
26
  self.chunks = []
27
  self.song_idx = []
28
  self.root = Path(root)
 
29
  self.chunk_size_in_sec = chunk_size_in_sec
30
  self.sample_rate = sample_rate
31
  self.mode = mode
@@ -35,7 +37,7 @@ class VocalSet(Dataset):
35
  self.normalize = effects.LoudnessNormalize(sample_rate, target_lufs_db=-20)
36
  self.effect_types = effect_types
37
 
38
- self.processed_root = self.root / "processed" / self.mode
39
 
40
  self.num_chunks = 0
41
  print("Total files:", len(self.files))
 
20
  chunk_size_in_sec: int = 3,
21
  effect_types: List[torch.nn.Module] = None,
22
  render_files: bool = True,
23
+ rendered_root: str = None,
24
  mode: str = "train",
25
  ):
26
  super().__init__()
27
  self.chunks = []
28
  self.song_idx = []
29
  self.root = Path(root)
30
+ self.rendered_root = Path(rendered_root)
31
  self.chunk_size_in_sec = chunk_size_in_sec
32
  self.sample_rate = sample_rate
33
  self.mode = mode
 
37
  self.normalize = effects.LoudnessNormalize(sample_rate, target_lufs_db=-20)
38
  self.effect_types = effect_types
39
 
40
+ self.processed_root = self.rendered_root / "processed" / self.mode
41
 
42
  self.num_chunks = 0
43
  print("Total files:", len(self.files))