Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import torchaudio | |
| from transformers import MusicgenForConditionalGeneration, MusicgenProcessor | |
| model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-melody", torch_dtype=torch.float32) | |
| processor = MusicgenProcessor.from_pretrained("facebook/musicgen-melody") | |
| def generate_music(prompt, melody): | |
| if melody is None: | |
| return None | |
| # Load and resample melody to 32kHz | |
| melody_waveform, melody_sr = torchaudio.load(melody) | |
| if melody_sr != 32000: | |
| resampler = torchaudio.transforms.Resample(orig_freq=melody_sr, new_freq=32000) | |
| melody_waveform = resampler(melody_waveform) | |
| # Trim or pad to 30 seconds | |
| melody_waveform = melody_waveform[:, :32000 * 30] | |
| # Run the model | |
| inputs = processor(audio=melody_waveform, sampling_rate=32000, text=[prompt], return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=1024) | |
| audio_array = outputs[0].cpu().numpy() | |
| return (audio_array, model.config.audio_encoder.sampling_rate) | |
| demo = gr.Interface( | |
| fn=generate_music, | |
| inputs=[ | |
| gr.Textbox(label="Prompt", placeholder="e.g., mellow lofi beat with piano"), | |
| gr.Audio(source="upload", type="filepath", label="Melody Input (WAV or MP3)") | |
| ], | |
| outputs=gr.Audio(label="Generated Track"), | |
| title="π΅ MusicGen-Melody AI Generator", | |
| description="Upload a melody and describe the vibe. Generates music using Metaβs MusicGen-Melody model." | |
| ) | |
| demo.launch() |