Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,6 @@ import torch
|
|
| 3 |
import torchaudio
|
| 4 |
from transformers import MusicgenForConditionalGeneration, MusicgenProcessor
|
| 5 |
|
| 6 |
-
# Load melody-capable model
|
| 7 |
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-melody", torch_dtype=torch.float32)
|
| 8 |
processor = MusicgenProcessor.from_pretrained("facebook/musicgen-melody")
|
| 9 |
|
|
@@ -11,24 +10,26 @@ def generate_music(prompt, melody):
|
|
| 11 |
if melody is None:
|
| 12 |
return None
|
| 13 |
|
| 14 |
-
# Load melody
|
| 15 |
melody_waveform, melody_sr = torchaudio.load(melody)
|
| 16 |
-
if melody_sr !=
|
| 17 |
-
resampler = torchaudio.transforms.Resample(orig_freq=melody_sr, new_freq=
|
| 18 |
melody_waveform = resampler(melody_waveform)
|
| 19 |
|
| 20 |
# Trim or pad to 30 seconds
|
| 21 |
-
melody_waveform = melody_waveform[:, :
|
| 22 |
|
| 23 |
-
|
|
|
|
| 24 |
outputs = model.generate(**inputs, max_new_tokens=1024)
|
|
|
|
| 25 |
audio_array = outputs[0].cpu().numpy()
|
| 26 |
return (audio_array, model.config.audio_encoder.sampling_rate)
|
| 27 |
|
| 28 |
demo = gr.Interface(
|
| 29 |
fn=generate_music,
|
| 30 |
inputs=[
|
| 31 |
-
gr.Textbox(label="Prompt", placeholder="e.g.,
|
| 32 |
gr.Audio(source="upload", type="filepath", label="Melody Input (WAV or MP3)")
|
| 33 |
],
|
| 34 |
outputs=gr.Audio(label="Generated Track"),
|
|
|
|
| 3 |
import torchaudio
|
| 4 |
from transformers import MusicgenForConditionalGeneration, MusicgenProcessor
|
| 5 |
|
|
|
|
| 6 |
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-melody", torch_dtype=torch.float32)
|
| 7 |
processor = MusicgenProcessor.from_pretrained("facebook/musicgen-melody")
|
| 8 |
|
|
|
|
| 10 |
if melody is None:
|
| 11 |
return None
|
| 12 |
|
| 13 |
+
# Load and resample melody to 32kHz
|
| 14 |
melody_waveform, melody_sr = torchaudio.load(melody)
|
| 15 |
+
if melody_sr != 32000:
|
| 16 |
+
resampler = torchaudio.transforms.Resample(orig_freq=melody_sr, new_freq=32000)
|
| 17 |
melody_waveform = resampler(melody_waveform)
|
| 18 |
|
| 19 |
# Trim or pad to 30 seconds
|
| 20 |
+
melody_waveform = melody_waveform[:, :32000 * 30]
|
| 21 |
|
| 22 |
+
# Run the model
|
| 23 |
+
inputs = processor(audio=melody_waveform, sampling_rate=32000, text=[prompt], return_tensors="pt")
|
| 24 |
outputs = model.generate(**inputs, max_new_tokens=1024)
|
| 25 |
+
|
| 26 |
audio_array = outputs[0].cpu().numpy()
|
| 27 |
return (audio_array, model.config.audio_encoder.sampling_rate)
|
| 28 |
|
| 29 |
demo = gr.Interface(
|
| 30 |
fn=generate_music,
|
| 31 |
inputs=[
|
| 32 |
+
gr.Textbox(label="Prompt", placeholder="e.g., mellow lofi beat with piano"),
|
| 33 |
gr.Audio(source="upload", type="filepath", label="Melody Input (WAV or MP3)")
|
| 34 |
],
|
| 35 |
outputs=gr.Audio(label="Generated Track"),
|