Spaces:
Sleeping
Sleeping
Commit
·
54bd1e6
1
Parent(s):
935c799
adding info to stuff
Browse files
app.py
CHANGED
@@ -167,20 +167,51 @@ def generate_music(seed, use_chords, chord_progression, prompt_duration, musicge
|
|
167 |
# Check if CUDA is available
|
168 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
iface.launch()
|
|
|
167 |
# Check if CUDA is available
|
168 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
169 |
|
170 |
+
# Define the expandable sections
|
171 |
+
musiclang_blurb = """
|
172 |
+
## MusicLang
|
173 |
+
MusicLang is a controllable AI MIDI model. It can generate MIDI sequences based on user-provided parameters, or unconditionally.
|
174 |
+
- [musiclang github](https://github.com/MusicLang/musiclang_predict)
|
175 |
+
- [musiclang huggingface space](https://huggingface.co/spaces/musiclang/musiclang-predict)
|
176 |
+
"""
|
177 |
+
|
178 |
+
musicgen_blurb = """
|
179 |
+
## musicgen
|
180 |
+
musicgen is a transformer-based music model that generates audio. It can also do something called a continuation, which was initially meant to extend musicgen outputs beyond 30 seconds. it can be used with any input audio to produce surprising results.
|
181 |
+
- [musicgen github](https://github.com/facebookresearch/audiocraft)
|
182 |
+
"""
|
183 |
+
|
184 |
+
finetunes_blurb = """
|
185 |
+
## Fine-tuned Models
|
186 |
+
the fine-tunes hosted on the huggingface hub are provided collectively by the musicgen discord community. thanks to vanya, mj, hoenn, and of course, lyra.
|
187 |
+
- [musicgen discord](https://discord.gg/93kX8rGZ)
|
188 |
+
- [fine-tuning colab notebook by lyra](https://colab.research.google.com/drive/13tbcC3A42KlaUZ21qvUXd25SFLu8WIvb)
|
189 |
+
"""
|
190 |
+
|
191 |
+
# Create the Gradio interface
|
192 |
+
with gr.Blocks() as iface:
|
193 |
+
gr.Markdown("# the-slot machine")
|
194 |
+
gr.Markdown("two ai's jamming. warning: outputs will be very strange, likely stupid, and possibly rad.")
|
195 |
+
gr.Markdown("this is a musical slot machine. using musiclang, we get a midi output. then, we let a musicgen model continue, semi-randomly, from different sections of the midi track. the slot machine combines em all at the end into something very bizarre. pick a number for the seed between 1 and 10k, or leave it blank to unlock the full rnjesus powers. if you wanna be lame, you can control the chord progression, prompt duration, musicgen model, number of iterations, and BPM.")
|
196 |
+
|
197 |
+
with gr.Accordion("More Info", open=False):
|
198 |
+
gr.Markdown(musiclang_blurb)
|
199 |
+
gr.Markdown(musicgen_blurb)
|
200 |
+
gr.Markdown(finetunes_blurb)
|
201 |
+
|
202 |
+
with gr.Row():
|
203 |
+
with gr.Column():
|
204 |
+
seed = gr.Textbox(label="Seed (leave blank for random)", value="")
|
205 |
+
use_chords = gr.Checkbox(label="Control Chord Progression", value=False)
|
206 |
+
chord_progression = gr.Textbox(label="Chord Progression (e.g., Am CM Dm E7 Am)", visible=True)
|
207 |
+
prompt_duration = gr.Dropdown(label="Prompt Duration (seconds)", choices=list(range(1, 11)), value=7)
|
208 |
+
musicgen_model = gr.Textbox(label="MusicGen Model", value="thepatch/vanya_ai_dnb_0.1")
|
209 |
+
num_iterations = gr.Slider(label="Number of Iterations", minimum=1, maximum=10, step=1, value=3)
|
210 |
+
bpm = gr.Slider(label="BPM", minimum=60, maximum=200, step=1, value=140)
|
211 |
+
generate_button = gr.Button("Generate Music")
|
212 |
+
with gr.Column():
|
213 |
+
output_audio = gr.Audio(label="Generated Music")
|
214 |
+
|
215 |
+
generate_button.click(generate_music, inputs=[seed, use_chords, chord_progression, prompt_duration, musicgen_model, num_iterations, bpm], outputs=output_audio)
|
216 |
|
217 |
iface.launch()
|