Spaces:
Runtime error
Runtime error
File size: 10,054 Bytes
90cacdf 7ac8557 90cacdf 7ac8557 90cacdf ff526b3 1e53a02 90cacdf 7ac8557 9a9a2c9 f3350b1 0e3a05d 5945136 90cacdf 7d6f241 90cacdf 7d6f241 ff526b3 1e53a02 e0a5f6f ff526b3 f73ffe0 d2155c7 90cacdf 9a9a2c9 d8d3e30 90cacdf 9a9a2c9 90cacdf 9a9a2c9 90cacdf 8125531 9a9a2c9 8125531 90cacdf 7d6f241 6448f47 106ab10 90cacdf ff526b3 b99be38 e8a69d9 b99be38 90cacdf 7d6f241 90cacdf 1e53a02 90cacdf 7d6f241 90cacdf 6448f47 90cacdf 1ff07dc 90cacdf ff526b3 90cacdf 7d6f241 1dd1464 1e53a02 b676040 1dd1464 7d6f241 1dd1464 6448f47 1dd1464 1ff07dc 1dd1464 90cacdf 6448f47 ff526b3 90cacdf 7d6f241 9a9a2c9 7d6f241 5945136 9a9a2c9 7d6f241 7ac8557 7d6f241 7ac8557 7d6f241 9a9a2c9 7d6f241 7ac8557 7d6f241 9a9a2c9 7ac8557 9a9a2c9 7ac8557 5945136 7d6f241 9a9a2c9 7ac8557 7d6f241 7ac8557 7d6f241 ca6b6f7 0e3a05d 7d6f241 7ac8557 7d6f241 ca6b6f7 9a9a2c9 7ac8557 f3350b1 0e3a05d f3350b1 7ac8557 7d6f241 7ac8557 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 |
import torch
import torchmetrics
import pytorch_lightning as pl
from torch import Tensor, nn
from torchaudio.models import HDemucs
from audio_diffusion_pytorch import DiffusionModel
from auraloss.time import SISDRLoss
from auraloss.freq import MultiResolutionSTFTLoss
from umx.openunmix.model import OpenUnmix, Separator
from remfx.utils import FADLoss, spectrogram
from remfx.tcn import TCN
from remfx.utils import causal_crop
import asteroid
class RemFX(pl.LightningModule):
def __init__(
self,
lr: float,
lr_beta1: float,
lr_beta2: float,
lr_eps: float,
lr_weight_decay: float,
sample_rate: float,
network: nn.Module,
):
super().__init__()
self.lr = lr
self.lr_beta1 = lr_beta1
self.lr_beta2 = lr_beta2
self.lr_eps = lr_eps
self.lr_weight_decay = lr_weight_decay
self.sample_rate = sample_rate
self.model = network
self.metrics = nn.ModuleDict(
{
"SISDR": SISDRLoss(),
"STFT": MultiResolutionSTFTLoss(),
"FAD": FADLoss(sample_rate=sample_rate),
}
)
# Log first batch metrics input vs output only once
self.log_train_audio = True
@property
def device(self):
return next(self.model.parameters()).device
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
list(self.model.parameters()),
lr=self.lr,
betas=(self.lr_beta1, self.lr_beta2),
eps=self.lr_eps,
weight_decay=self.lr_weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
[0.8 * self.trainer.max_steps, 0.95 * self.trainer.max_steps],
gamma=0.1,
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": lr_scheduler,
"monitor": "val_loss",
"interval": "step",
"frequency": 1,
},
}
def training_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="train")
def validation_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="valid")
def test_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="test")
def common_step(self, batch, batch_idx, mode: str = "train"):
x, y, _, _ = batch # x, y = (B, C, T), (B, C, T)
loss, output = self.model((x, y))
# Crop target to match output
if output.shape[-1] < y.shape[-1]:
y = causal_crop(y, output.shape[-1])
self.log(f"{mode}_loss", loss)
# Metric logging
with torch.no_grad():
for metric in self.metrics:
# SISDR returns negative values, so negate them
if metric == "SISDR":
negate = -1
else:
negate = 1
# Only Log FAD on test set
if metric == "FAD" and mode != "test":
continue
self.log(
f"{mode}_{metric}",
negate * self.metrics[metric](output, y),
on_step=False,
on_epoch=True,
logger=True,
prog_bar=True,
sync_dist=True,
)
return loss
class OpenUnmixModel(nn.Module):
def __init__(
self,
n_fft: int = 2048,
hop_length: int = 512,
n_channels: int = 1,
alpha: float = 0.3,
sample_rate: int = 22050,
):
super().__init__()
self.n_channels = n_channels
self.n_fft = n_fft
self.hop_length = hop_length
self.alpha = alpha
window = torch.hann_window(n_fft)
self.register_buffer("window", window)
self.num_bins = self.n_fft // 2 + 1
self.sample_rate = sample_rate
self.model = OpenUnmix(
nb_channels=self.n_channels,
nb_bins=self.num_bins,
)
self.separator = Separator(
target_models={"other": self.model},
nb_channels=self.n_channels,
sample_rate=self.sample_rate,
n_fft=self.n_fft,
n_hop=self.hop_length,
)
self.mrstftloss = MultiResolutionSTFTLoss(
n_bins=self.num_bins, sample_rate=self.sample_rate
)
self.l1loss = nn.L1Loss()
def forward(self, batch):
x, target = batch
X = spectrogram(x, self.window, self.n_fft, self.hop_length, self.alpha)
Y = self.model(X)
sep_out = self.separator(x).squeeze(1)
loss = self.mrstftloss(sep_out, target) + self.l1loss(sep_out, target) * 100
return loss, sep_out
def sample(self, x: Tensor) -> Tensor:
return self.separator(x).squeeze(1)
class DemucsModel(nn.Module):
def __init__(self, sample_rate, **kwargs) -> None:
super().__init__()
self.model = HDemucs(**kwargs)
self.num_bins = kwargs["nfft"] // 2 + 1
self.mrstftloss = MultiResolutionSTFTLoss(
n_bins=self.num_bins, sample_rate=sample_rate
)
self.l1loss = nn.L1Loss()
def forward(self, batch):
x, target = batch
output = self.model(x).squeeze(1)
loss = self.mrstftloss(output, target) + self.l1loss(output, target) * 100
return loss, output
def sample(self, x: Tensor) -> Tensor:
return self.model(x).squeeze(1)
class DiffusionGenerationModel(nn.Module):
def __init__(self, n_channels: int = 1):
super().__init__()
self.model = DiffusionModel(in_channels=n_channels)
def forward(self, batch):
x, target = batch
sampled_out = self.model.sample(x)
return self.model(x), sampled_out
def sample(self, x: Tensor, num_steps: int = 10) -> Tensor:
noise = torch.randn(x.shape).to(x)
return self.model.sample(noise, num_steps=num_steps)
class DPTNetModel(nn.Module):
def __init__(self, sample_rate, num_bins, **kwargs):
super().__init__()
self.model = asteroid.models.dptnet.DPTNet(**kwargs)
self.num_bins = num_bins
self.mrstftloss = MultiResolutionSTFTLoss(
n_bins=self.num_bins, sample_rate=sample_rate
)
self.l1loss = nn.L1Loss()
def forward(self, batch):
x, target = batch
output = self.model(x.squeeze(1))
loss = self.mrstftloss(output, target) + self.l1loss(output, target) * 100
return loss, output
def sample(self, x: Tensor) -> Tensor:
return self.model(x.squeeze(1))
def __init__(self, sample_rate, num_bins, **kwargs):
super().__init__()
self.model = asteroid.models.DCUNet(**kwargs)
self.mrstftloss = MultiResolutionSTFTLoss(
n_bins=num_bins, sample_rate=sample_rate
)
self.l1loss = nn.L1Loss()
def forward(self, batch):
x, target = batch
output = self.model(x.squeeze(1)) # B x T
# Crop target to match output
if output.shape[-1] < target.shape[-1]:
target = causal_crop(target, output.shape[-1])
loss = self.mrstftloss(output, target) + self.l1loss(output, target) * 100
return loss, output
def sample(self, x: Tensor) -> Tensor:
output = self.model(x.squeeze(1)) # B x T
return output
class TCNModel(nn.Module):
def __init__(self, sample_rate, num_bins, **kwargs):
super().__init__()
self.model = TCN(**kwargs)
self.mrstftloss = MultiResolutionSTFTLoss(
n_bins=num_bins, sample_rate=sample_rate
)
self.l1loss = nn.L1Loss()
def forward(self, batch):
x, target = batch
output = self.model(x) # B x 1 x T
# Crop target to match output
if output.shape[-1] < target.shape[-1]:
target = causal_crop(target, output.shape[-1])
loss = self.mrstftloss(output, target) + self.l1loss(output, target) * 100
return loss, output
def sample(self, x: Tensor) -> Tensor:
output = self.model(x) # B x 1 x T
return output
class FXClassifier(pl.LightningModule):
def __init__(
self,
lr: float,
lr_weight_decay: float,
sample_rate: float,
network: nn.Module,
):
super().__init__()
self.lr = lr
self.lr_weight_decay = lr_weight_decay
self.sample_rate = sample_rate
self.network = network
def forward(self, x: torch.Tensor):
return self.network(x)
def common_step(self, batch, batch_idx, mode: str = "train"):
x, y, dry_label, wet_label = batch
pred_label = self.network(x)
loss = nn.functional.cross_entropy(pred_label, dry_label)
self.log(
f"{mode}_loss",
loss,
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
f"{mode}_mAP",
torchmetrics.functional.retrieval_average_precision(
pred_label, dry_label.long()
),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss
def training_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="train")
def validation_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="valid")
def test_step(self, batch, batch_idx):
return self.common_step(batch, batch_idx, mode="test")
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.network.parameters(),
lr=self.lr,
weight_decay=self.lr_weight_decay,
)
return optimizer
|