Spaces:
Runtime error
Runtime error
Merge branch 'main' of https://github.com/descriptinc/lyrebird-vampnet into main
Browse files- requirements.txt +1 -0
- vampnet/beats.py +249 -0
- vampnet/interface.py +99 -7
- vampnet/modules/base.py +6 -5
requirements.txt
CHANGED
|
@@ -3,6 +3,7 @@ pytorch-ignite
|
|
| 3 |
rich
|
| 4 |
audiotools @ git+https://github.com/descriptinc/lyrebird-audiotools.git@hf/backup-info
|
| 5 |
lac @ git+https://github.com/descriptinc/lyrebird-audio-codec.git@hf/vampnet-temp
|
|
|
|
| 6 |
tqdm
|
| 7 |
tensorboard
|
| 8 |
google-cloud-logging==2.2.0
|
|
|
|
| 3 |
rich
|
| 4 |
audiotools @ git+https://github.com/descriptinc/lyrebird-audiotools.git@hf/backup-info
|
| 5 |
lac @ git+https://github.com/descriptinc/lyrebird-audio-codec.git@hf/vampnet-temp
|
| 6 |
+
wavebeat @ git+https://github.com/hugofloresgarcia/wavebeat.git
|
| 7 |
tqdm
|
| 8 |
tensorboard
|
| 9 |
google-cloud-logging==2.2.0
|
vampnet/beats.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import warnings
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any
|
| 7 |
+
from typing import List
|
| 8 |
+
from typing import Tuple
|
| 9 |
+
from typing import Union
|
| 10 |
+
|
| 11 |
+
import librosa
|
| 12 |
+
import numpy as np
|
| 13 |
+
from audiotools import AudioSignal
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
|
| 18 |
+
###################
|
| 19 |
+
# beat sync utils #
|
| 20 |
+
###################
|
| 21 |
+
|
| 22 |
+
AGGREGATOR_REGISTRY = {
|
| 23 |
+
"mean": np.mean,
|
| 24 |
+
"median": np.median,
|
| 25 |
+
"max": np.max,
|
| 26 |
+
"min": np.min,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def list_aggregators() -> list:
|
| 31 |
+
return list(AGGREGATOR_REGISTRY.keys())
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class TimeSegment:
|
| 36 |
+
start: float
|
| 37 |
+
end: float
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def duration(self):
|
| 41 |
+
return self.end - self.start
|
| 42 |
+
|
| 43 |
+
def __str__(self) -> str:
|
| 44 |
+
return f"{self.start} - {self.end}"
|
| 45 |
+
|
| 46 |
+
def find_overlapping_segment(
|
| 47 |
+
self, segments: List["TimeSegment"]
|
| 48 |
+
) -> Union["TimeSegment", None]:
|
| 49 |
+
"""Find the first segment that overlaps with this segment, or None if no segment overlaps"""
|
| 50 |
+
for s in segments:
|
| 51 |
+
if s.start <= self.start and s.end >= self.end:
|
| 52 |
+
return s
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def mkdir(path: Union[Path, str]) -> Path:
|
| 57 |
+
p = Path(path)
|
| 58 |
+
p.mkdir(parents=True, exist_ok=True)
|
| 59 |
+
return p
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
###################
|
| 64 |
+
# beat data #
|
| 65 |
+
###################
|
| 66 |
+
@dataclass
|
| 67 |
+
class BeatSegment(TimeSegment):
|
| 68 |
+
downbeat: bool = False # if there's a downbeat on the start_time
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class Beats:
|
| 72 |
+
def __init__(self, beat_times, downbeat_times):
|
| 73 |
+
if isinstance(beat_times, np.ndarray):
|
| 74 |
+
beat_times = beat_times.tolist()
|
| 75 |
+
if isinstance(downbeat_times, np.ndarray):
|
| 76 |
+
downbeat_times = downbeat_times.tolist()
|
| 77 |
+
self._beat_times = beat_times
|
| 78 |
+
self._downbeat_times = downbeat_times
|
| 79 |
+
self._use_downbeats = False
|
| 80 |
+
|
| 81 |
+
def use_downbeats(self, use_downbeats: bool = True):
|
| 82 |
+
"""use downbeats instead of beats when calling beat_times"""
|
| 83 |
+
self._use_downbeats = use_downbeats
|
| 84 |
+
|
| 85 |
+
def beat_segments(self, signal: AudioSignal) -> List[BeatSegment]:
|
| 86 |
+
"""
|
| 87 |
+
segments a song into time segments corresponding to beats.
|
| 88 |
+
the first segment starts at 0 and ends at the first beat time.
|
| 89 |
+
the last segment starts at the last beat time and ends at the end of the song.
|
| 90 |
+
"""
|
| 91 |
+
beat_times = self._beat_times.copy()
|
| 92 |
+
downbeat_times = self._downbeat_times
|
| 93 |
+
beat_times.insert(0, 0)
|
| 94 |
+
beat_times.append(signal.signal_duration)
|
| 95 |
+
|
| 96 |
+
downbeat_ids = np.intersect1d(beat_times, downbeat_times, return_indices=True)[
|
| 97 |
+
1
|
| 98 |
+
]
|
| 99 |
+
is_downbeat = [
|
| 100 |
+
True if i in downbeat_ids else False for i in range(len(beat_times))
|
| 101 |
+
]
|
| 102 |
+
segments = [
|
| 103 |
+
BeatSegment(start_time, end_time, downbeat)
|
| 104 |
+
for start_time, end_time, downbeat in zip(
|
| 105 |
+
beat_times[:-1], beat_times[1:], is_downbeat
|
| 106 |
+
)
|
| 107 |
+
]
|
| 108 |
+
return segments
|
| 109 |
+
|
| 110 |
+
def get_beats(self) -> np.ndarray:
|
| 111 |
+
"""returns an array of beat times, in seconds
|
| 112 |
+
if downbeats is True, returns an array of downbeat times, in seconds
|
| 113 |
+
"""
|
| 114 |
+
return np.array(
|
| 115 |
+
self._downbeat_times if self._use_downbeats else self._beat_times
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
@property
|
| 119 |
+
def beat_times(self) -> np.ndarray:
|
| 120 |
+
"""return beat times"""
|
| 121 |
+
return np.array(self._beat_times)
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def downbeat_times(self) -> np.ndarray:
|
| 125 |
+
"""return downbeat times"""
|
| 126 |
+
return np.array(self._downbeat_times)
|
| 127 |
+
|
| 128 |
+
def beat_times_to_feature_frames(
|
| 129 |
+
self, signal: AudioSignal, features: np.ndarray
|
| 130 |
+
) -> np.ndarray:
|
| 131 |
+
"""convert beat times to frames, given an array of time-varying features"""
|
| 132 |
+
beat_times = self.get_beats()
|
| 133 |
+
beat_frames = (
|
| 134 |
+
beat_times * signal.sample_rate / signal.signal_length * features.shape[-1]
|
| 135 |
+
).astype(np.int64)
|
| 136 |
+
return beat_frames
|
| 137 |
+
|
| 138 |
+
def sync_features(
|
| 139 |
+
self, feature_frames: np.ndarray, features: np.ndarray, aggregate="median"
|
| 140 |
+
) -> np.ndarray:
|
| 141 |
+
"""sync features to beats"""
|
| 142 |
+
if aggregate not in AGGREGATOR_REGISTRY:
|
| 143 |
+
raise ValueError(f"unknown aggregation method {aggregate}")
|
| 144 |
+
|
| 145 |
+
return librosa.util.sync(
|
| 146 |
+
features, feature_frames, aggregate=AGGREGATOR_REGISTRY[aggregate]
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def to_json(self) -> dict:
|
| 150 |
+
"""return beats and downbeats as json"""
|
| 151 |
+
return {
|
| 152 |
+
"beats": self._beat_times,
|
| 153 |
+
"downbeats": self._downbeat_times,
|
| 154 |
+
"use_downbeats": self._use_downbeats,
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
@classmethod
|
| 158 |
+
def from_dict(cls, data: dict):
|
| 159 |
+
"""load beats and downbeats from json"""
|
| 160 |
+
inst = cls(data["beats"], data["downbeats"])
|
| 161 |
+
inst.use_downbeats(data["use_downbeats"])
|
| 162 |
+
return inst
|
| 163 |
+
|
| 164 |
+
def save(self, output_dir: Path):
|
| 165 |
+
"""save beats and downbeats to json"""
|
| 166 |
+
mkdir(output_dir)
|
| 167 |
+
with open(output_dir / "beats.json", "w") as f:
|
| 168 |
+
json.dump(self.to_json(), f)
|
| 169 |
+
|
| 170 |
+
@classmethod
|
| 171 |
+
def load(cls, input_dir: Path):
|
| 172 |
+
"""load beats and downbeats from json"""
|
| 173 |
+
beats_file = Path(input_dir) / "beats.json"
|
| 174 |
+
with open(beats_file, "r") as f:
|
| 175 |
+
data = json.load(f)
|
| 176 |
+
return cls.from_dict(data)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
###################
|
| 180 |
+
# beat tracking #
|
| 181 |
+
###################
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class BeatTracker:
|
| 185 |
+
def extract_beats(self, signal: AudioSignal) -> Tuple[np.ndarray, np.ndarray]:
|
| 186 |
+
"""extract beats from an audio signal"""
|
| 187 |
+
raise NotImplementedError
|
| 188 |
+
|
| 189 |
+
def __call__(self, signal: AudioSignal) -> Beats:
|
| 190 |
+
"""extract beats from an audio signal
|
| 191 |
+
NOTE: if the first beat (and/or downbeat) is detected within the first 100ms of the audio,
|
| 192 |
+
it is discarded. This is to avoid empty bins with no beat synced features in the first beat.
|
| 193 |
+
Args:
|
| 194 |
+
signal (AudioSignal): signal to beat track
|
| 195 |
+
Returns:
|
| 196 |
+
Tuple[np.ndarray, np.ndarray]: beats and downbeats
|
| 197 |
+
"""
|
| 198 |
+
beats, downbeats = self.extract_beats(signal)
|
| 199 |
+
return Beats(beats, downbeats)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class WaveBeat(BeatTracker):
|
| 203 |
+
def __init__(self, ckpt_path: str = "checkpoints/wavebeat", device: str = "cpu"):
|
| 204 |
+
from wavebeat.dstcn import dsTCNModel
|
| 205 |
+
|
| 206 |
+
model = dsTCNModel.load_from_checkpoint(ckpt_path)
|
| 207 |
+
model.eval()
|
| 208 |
+
|
| 209 |
+
self.device = device
|
| 210 |
+
self.model = model
|
| 211 |
+
|
| 212 |
+
def extract_beats(self, signal: AudioSignal) -> Tuple[np.ndarray, np.ndarray]:
|
| 213 |
+
"""returns beat and downbeat times, in seconds"""
|
| 214 |
+
# extract beats
|
| 215 |
+
beats, downbeats = self.model.predict_beats_from_array(
|
| 216 |
+
audio=signal.audio_data.squeeze(0),
|
| 217 |
+
sr=signal.sample_rate,
|
| 218 |
+
use_gpu=self.device is not "cpu",
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
return beats, downbeats
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class MadmomBeats(BeatTracker):
|
| 225 |
+
def __init__(self):
|
| 226 |
+
raise NotImplementedError
|
| 227 |
+
|
| 228 |
+
def extract_beats(self, signal: AudioSignal) -> Tuple[np.ndarray, np.ndarray]:
|
| 229 |
+
"""returns beat and downbeat times, in seconds"""
|
| 230 |
+
pass
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
BEAT_TRACKER_REGISTRY = {
|
| 234 |
+
"wavebeat": WaveBeat,
|
| 235 |
+
"madmom": MadmomBeats,
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def list_beat_trackers() -> list:
|
| 240 |
+
return list(BEAT_TRACKER_REGISTRY.keys())
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def load_beat_tracker(beat_tracker: str, **kwargs) -> BeatTracker:
|
| 244 |
+
if beat_tracker not in BEAT_TRACKER_REGISTRY:
|
| 245 |
+
raise ValueError(
|
| 246 |
+
f"Unknown beat tracker {beat_tracker}. Available: {list_beat_trackers()}"
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
return BEAT_TRACKER_REGISTRY[beat_tracker](**kwargs)
|
vampnet/interface.py
CHANGED
|
@@ -3,14 +3,15 @@ from pathlib import Path
|
|
| 3 |
import math
|
| 4 |
|
| 5 |
import torch
|
|
|
|
| 6 |
from audiotools import AudioSignal
|
| 7 |
import tqdm
|
| 8 |
|
| 9 |
from .modules.transformer import VampNet
|
|
|
|
| 10 |
from lac.model.lac import LAC
|
| 11 |
|
| 12 |
|
| 13 |
-
|
| 14 |
def signal_concat(
|
| 15 |
audio_signals: list,
|
| 16 |
):
|
|
@@ -50,7 +51,10 @@ class Interface:
|
|
| 50 |
|
| 51 |
def s2t(self, seconds: float):
|
| 52 |
"""seconds to tokens"""
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
def s2t2s(self, seconds: float):
|
| 56 |
"""seconds to tokens to seconds"""
|
|
@@ -83,12 +87,85 @@ class Interface:
|
|
| 83 |
.ensure_max_of_audio(1.0)
|
| 84 |
)
|
| 85 |
return signal
|
|
|
|
| 86 |
@torch.inference_mode()
|
| 87 |
def encode(self, signal: AudioSignal):
|
| 88 |
signal = self.preprocess(signal).to(self.device)
|
| 89 |
z = self.codec.encode(signal.samples, signal.sample_rate)["codes"]
|
| 90 |
return z
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
def coarse_to_fine(
|
| 93 |
self,
|
| 94 |
coarse_z: torch.Tensor,
|
|
@@ -231,7 +308,9 @@ class Interface:
|
|
| 231 |
downsample_factor: int = None,
|
| 232 |
intensity: float = 1.0,
|
| 233 |
debug=False,
|
| 234 |
-
swap_prefix_suffix=False,
|
|
|
|
|
|
|
| 235 |
**kwargs
|
| 236 |
):
|
| 237 |
z = self.encode(signal)
|
|
@@ -258,14 +337,16 @@ class Interface:
|
|
| 258 |
|
| 259 |
_cz = cz.clone()
|
| 260 |
cz_mask = None
|
| 261 |
-
|
|
|
|
| 262 |
# add noise
|
| 263 |
cz_masked, cz_mask = self.coarse.add_noise(
|
| 264 |
_cz, r=1.0-intensity,
|
| 265 |
n_prefix=n_prefix,
|
| 266 |
n_suffix=n_suffix,
|
| 267 |
downsample_factor=downsample_factor,
|
| 268 |
-
mask=cz_mask
|
|
|
|
| 269 |
)
|
| 270 |
if debug:
|
| 271 |
print("tokens to infer")
|
|
@@ -366,8 +447,9 @@ class Interface:
|
|
| 366 |
def variation(
|
| 367 |
self,
|
| 368 |
signal: AudioSignal,
|
| 369 |
-
overlap_hop_ratio: float = 1.0, # TODO: should this be fixed to 1.0? or should we overlap and replace instead of overlap add
|
| 370 |
verbose: bool = False,
|
|
|
|
|
|
|
| 371 |
**kwargs
|
| 372 |
):
|
| 373 |
signal = signal.clone()
|
|
@@ -380,6 +462,9 @@ class Interface:
|
|
| 380 |
math.ceil(signal.duration / self.coarse.chunk_size_s)
|
| 381 |
* self.coarse.chunk_size_s
|
| 382 |
)
|
|
|
|
|
|
|
|
|
|
| 383 |
hop_duration = self.coarse.chunk_size_s * overlap_hop_ratio
|
| 384 |
original_length = signal.length
|
| 385 |
|
|
@@ -398,10 +483,18 @@ class Interface:
|
|
| 398 |
signal.samples[i,...], signal.sample_rate
|
| 399 |
)
|
| 400 |
sig.to(self.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 401 |
out_z = self.coarse_vamp_v2(
|
| 402 |
sig,
|
| 403 |
num_vamps=1,
|
| 404 |
swap_prefix_suffix=False,
|
|
|
|
|
|
|
| 405 |
**kwargs
|
| 406 |
)
|
| 407 |
if self.c2f is not None:
|
|
@@ -415,7 +508,6 @@ class Interface:
|
|
| 415 |
output.truncate_samples(original_length)
|
| 416 |
return output
|
| 417 |
|
| 418 |
-
|
| 419 |
# create a loop of a single region with variations
|
| 420 |
# TODO: this would work nicer if we could trim at the beat
|
| 421 |
# otherwise the model has to awkwardly fill up space that won't match
|
|
|
|
| 3 |
import math
|
| 4 |
|
| 5 |
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
from audiotools import AudioSignal
|
| 8 |
import tqdm
|
| 9 |
|
| 10 |
from .modules.transformer import VampNet
|
| 11 |
+
from .beats import WaveBeat
|
| 12 |
from lac.model.lac import LAC
|
| 13 |
|
| 14 |
|
|
|
|
| 15 |
def signal_concat(
|
| 16 |
audio_signals: list,
|
| 17 |
):
|
|
|
|
| 51 |
|
| 52 |
def s2t(self, seconds: float):
|
| 53 |
"""seconds to tokens"""
|
| 54 |
+
if isinstance(seconds, np.ndarray):
|
| 55 |
+
return np.ceil(seconds * self.codec.sample_rate / self.codec.hop_length)
|
| 56 |
+
else:
|
| 57 |
+
return math.ceil(seconds * self.codec.sample_rate / self.codec.hop_length)
|
| 58 |
|
| 59 |
def s2t2s(self, seconds: float):
|
| 60 |
"""seconds to tokens to seconds"""
|
|
|
|
| 87 |
.ensure_max_of_audio(1.0)
|
| 88 |
)
|
| 89 |
return signal
|
| 90 |
+
|
| 91 |
@torch.inference_mode()
|
| 92 |
def encode(self, signal: AudioSignal):
|
| 93 |
signal = self.preprocess(signal).to(self.device)
|
| 94 |
z = self.codec.encode(signal.samples, signal.sample_rate)["codes"]
|
| 95 |
return z
|
| 96 |
|
| 97 |
+
def make_beat_mask(self,
|
| 98 |
+
signal: AudioSignal,
|
| 99 |
+
before_beat_s: float = 0.1,
|
| 100 |
+
after_beat_s: float = 0.1,
|
| 101 |
+
mask_downbeats: bool = True,
|
| 102 |
+
mask_upbeats: bool = True,
|
| 103 |
+
downbeat_downsample_factor: int = None,
|
| 104 |
+
beat_downsample_factor: int = None,
|
| 105 |
+
dropout: float = 0.7,
|
| 106 |
+
invert: bool = True,
|
| 107 |
+
):
|
| 108 |
+
"""make a beat synced mask. that is, make a mask that
|
| 109 |
+
places 1s at and around the beat, and 0s everywhere else.
|
| 110 |
+
"""
|
| 111 |
+
assert hasattr(self, "beat_tracker"), "No beat tracker loaded"
|
| 112 |
+
|
| 113 |
+
# get the beat times
|
| 114 |
+
beats, downbeats = self.beat_tracker.extract_beats(signal)
|
| 115 |
+
|
| 116 |
+
# get the beat indices in z
|
| 117 |
+
beats_z, downbeats_z = self.s2t(beats), self.s2t(downbeats)
|
| 118 |
+
|
| 119 |
+
# remove downbeats from beats
|
| 120 |
+
beats_z = torch.tensor(beats_z)[~torch.isin(torch.tensor(beats_z), torch.tensor(downbeats_z))]
|
| 121 |
+
beats_z = beats_z.tolist()
|
| 122 |
+
downbeats_z = downbeats_z.tolist()
|
| 123 |
+
|
| 124 |
+
# make the mask
|
| 125 |
+
seq_len = self.s2t(signal.duration)
|
| 126 |
+
mask = torch.zeros(seq_len, device=self.device)
|
| 127 |
+
|
| 128 |
+
mask_b4 = self.s2t(before_beat_s)
|
| 129 |
+
mask_after = self.s2t(after_beat_s)
|
| 130 |
+
|
| 131 |
+
if beat_downsample_factor is not None:
|
| 132 |
+
if beat_downsample_factor < 1:
|
| 133 |
+
raise ValueError("mask_beat_downsample_factor must be >= 1 or None")
|
| 134 |
+
else:
|
| 135 |
+
beat_downsample_factor = 1
|
| 136 |
+
|
| 137 |
+
if downbeat_downsample_factor is not None:
|
| 138 |
+
if downbeat_downsample_factor < 1:
|
| 139 |
+
raise ValueError("mask_beat_downsample_factor must be >= 1 or None")
|
| 140 |
+
else:
|
| 141 |
+
downbeat_downsample_factor = 1
|
| 142 |
+
|
| 143 |
+
beats_z = beats_z[::beat_downsample_factor]
|
| 144 |
+
downbeats_z = downbeats_z[::downbeat_downsample_factor]
|
| 145 |
+
|
| 146 |
+
if mask_upbeats:
|
| 147 |
+
for beat_idx in beats_z:
|
| 148 |
+
_slice = int(beat_idx - mask_b4), int(beat_idx + mask_after)
|
| 149 |
+
num_steps = mask[_slice[0]:_slice[1]].shape[0]
|
| 150 |
+
_m = torch.ones(num_steps, device=self.device)
|
| 151 |
+
_m = torch.nn.functional.dropout(_m, p=dropout)
|
| 152 |
+
|
| 153 |
+
mask[_slice[0]:_slice[1]] = _m
|
| 154 |
+
|
| 155 |
+
if mask_downbeats:
|
| 156 |
+
for downbeat_idx in downbeats_z:
|
| 157 |
+
_slice = int(downbeat_idx - mask_b4), int(downbeat_idx + mask_after)
|
| 158 |
+
num_steps = mask[_slice[0]:_slice[1]].shape[0]
|
| 159 |
+
_m = torch.ones(num_steps, device=self.device)
|
| 160 |
+
_m = torch.nn.functional.dropout(_m, p=dropout)
|
| 161 |
+
|
| 162 |
+
mask[_slice[0]:_slice[1]] = _m
|
| 163 |
+
|
| 164 |
+
if invert:
|
| 165 |
+
mask = 1 - mask
|
| 166 |
+
|
| 167 |
+
return mask[None, None, :].bool().long()
|
| 168 |
+
|
| 169 |
def coarse_to_fine(
|
| 170 |
self,
|
| 171 |
coarse_z: torch.Tensor,
|
|
|
|
| 308 |
downsample_factor: int = None,
|
| 309 |
intensity: float = 1.0,
|
| 310 |
debug=False,
|
| 311 |
+
swap_prefix_suffix=False,
|
| 312 |
+
ext_mask=None,
|
| 313 |
+
verbose=False,
|
| 314 |
**kwargs
|
| 315 |
):
|
| 316 |
z = self.encode(signal)
|
|
|
|
| 337 |
|
| 338 |
_cz = cz.clone()
|
| 339 |
cz_mask = None
|
| 340 |
+
range_fn = tqdm.trange if verbose else range
|
| 341 |
+
for _ in range_fn(num_vamps):
|
| 342 |
# add noise
|
| 343 |
cz_masked, cz_mask = self.coarse.add_noise(
|
| 344 |
_cz, r=1.0-intensity,
|
| 345 |
n_prefix=n_prefix,
|
| 346 |
n_suffix=n_suffix,
|
| 347 |
downsample_factor=downsample_factor,
|
| 348 |
+
mask=cz_mask,
|
| 349 |
+
ext_mask=ext_mask
|
| 350 |
)
|
| 351 |
if debug:
|
| 352 |
print("tokens to infer")
|
|
|
|
| 447 |
def variation(
|
| 448 |
self,
|
| 449 |
signal: AudioSignal,
|
|
|
|
| 450 |
verbose: bool = False,
|
| 451 |
+
beat_mask: bool = False,
|
| 452 |
+
beat_mask_kwargs: dict = {},
|
| 453 |
**kwargs
|
| 454 |
):
|
| 455 |
signal = signal.clone()
|
|
|
|
| 462 |
math.ceil(signal.duration / self.coarse.chunk_size_s)
|
| 463 |
* self.coarse.chunk_size_s
|
| 464 |
)
|
| 465 |
+
# eventually we DO want overlap, but we want overlap-replace not
|
| 466 |
+
# overlap-add
|
| 467 |
+
overlap_hop_ratio = 1.0
|
| 468 |
hop_duration = self.coarse.chunk_size_s * overlap_hop_ratio
|
| 469 |
original_length = signal.length
|
| 470 |
|
|
|
|
| 483 |
signal.samples[i,...], signal.sample_rate
|
| 484 |
)
|
| 485 |
sig.to(self.device)
|
| 486 |
+
|
| 487 |
+
if beat_mask:
|
| 488 |
+
ext_mask = self.make_beat_mask(sig, **beat_mask_kwargs)
|
| 489 |
+
else:
|
| 490 |
+
ext_mask = None
|
| 491 |
+
|
| 492 |
out_z = self.coarse_vamp_v2(
|
| 493 |
sig,
|
| 494 |
num_vamps=1,
|
| 495 |
swap_prefix_suffix=False,
|
| 496 |
+
ext_mask=ext_mask,
|
| 497 |
+
verbose=verbose,
|
| 498 |
**kwargs
|
| 499 |
)
|
| 500 |
if self.c2f is not None:
|
|
|
|
| 508 |
output.truncate_samples(original_length)
|
| 509 |
return output
|
| 510 |
|
|
|
|
| 511 |
# create a loop of a single region with variations
|
| 512 |
# TODO: this would work nicer if we could trim at the beat
|
| 513 |
# otherwise the model has to awkwardly fill up space that won't match
|
vampnet/modules/base.py
CHANGED
|
@@ -31,17 +31,13 @@ class VampBase(at.ml.BaseModel):
|
|
| 31 |
def forward(self, x: torch.Tensor, r: torch.Tensor):
|
| 32 |
raise NotImplementedError
|
| 33 |
|
| 34 |
-
# TODO: add a beat tracking method
|
| 35 |
-
# that uses a beat tracking model to find beat positions
|
| 36 |
-
# and then unmask the codes in those poisitions (with some width)
|
| 37 |
-
# and drop them out with some randomness
|
| 38 |
-
# and have the option to DONT drop out downbeats for
|
| 39 |
def add_noise(
|
| 40 |
self,
|
| 41 |
x: torch.Tensor,
|
| 42 |
r: torch.Tensor,
|
| 43 |
random_x: Optional[torch.Tensor] = None,
|
| 44 |
mask: Optional[torch.Tensor] = None,
|
|
|
|
| 45 |
n_prefix: Optional[torch.Tensor] = None,
|
| 46 |
n_suffix: Optional[torch.Tensor] = None,
|
| 47 |
downsample_factor: Optional[int] = None,
|
|
@@ -99,6 +95,11 @@ class VampBase(at.ml.BaseModel):
|
|
| 99 |
else:
|
| 100 |
raise ValueError(f"invalid noise mode {self.noise_mode}")
|
| 101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
x = x * (1 - mask) + random_x * mask
|
| 103 |
return x, mask
|
| 104 |
|
|
|
|
| 31 |
def forward(self, x: torch.Tensor, r: torch.Tensor):
|
| 32 |
raise NotImplementedError
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
def add_noise(
|
| 35 |
self,
|
| 36 |
x: torch.Tensor,
|
| 37 |
r: torch.Tensor,
|
| 38 |
random_x: Optional[torch.Tensor] = None,
|
| 39 |
mask: Optional[torch.Tensor] = None,
|
| 40 |
+
ext_mask: Optional[torch.Tensor] = None,
|
| 41 |
n_prefix: Optional[torch.Tensor] = None,
|
| 42 |
n_suffix: Optional[torch.Tensor] = None,
|
| 43 |
downsample_factor: Optional[int] = None,
|
|
|
|
| 95 |
else:
|
| 96 |
raise ValueError(f"invalid noise mode {self.noise_mode}")
|
| 97 |
|
| 98 |
+
# add the external mask if we were given one
|
| 99 |
+
if ext_mask is not None:
|
| 100 |
+
assert ext_mask.ndim == 3, "mask must be (batch, n_codebooks, seq)"
|
| 101 |
+
mask = (mask * ext_mask).bool().long()
|
| 102 |
+
|
| 103 |
x = x * (1 - mask) + random_x * mask
|
| 104 |
return x, mask
|
| 105 |
|