Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
changed UI, FX params and sample prompts
Browse files
app.py
CHANGED
@@ -1,21 +1,19 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
import librosa
|
4 |
-
import soundfile as sf
|
5 |
import numpy as np
|
6 |
from inference import generate_drum_kit
|
7 |
from audio_utils import play_audio
|
8 |
from fx import get_fx
|
9 |
|
10 |
# Streamlit UI
|
11 |
-
st.title("
|
12 |
st.subheader("generate drum kits and audio effects with text prompts")
|
13 |
st.write("uses publicly available samples from [freesound](https://zenodo.org/records/4687854) and [CLAP embeddings](https://github.com/LAION-AI/CLAP) for text-based querying")
|
14 |
st.write("hint: turn audio effects on! try weird prompts!")
|
15 |
|
16 |
with st.container(border=True):
|
17 |
# User Inputs
|
18 |
-
prompt = st.text_input("Describe your drum kit:", "warm vintage
|
19 |
kit_size = st.slider("Number of sounds per instrument:", 1, 10, 4)
|
20 |
use_fx = st.toggle("Apply audio effects?", value=True)
|
21 |
if use_fx:
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
|
|
3 |
import numpy as np
|
4 |
from inference import generate_drum_kit
|
5 |
from audio_utils import play_audio
|
6 |
from fx import get_fx
|
7 |
|
8 |
# Streamlit UI
|
9 |
+
st.title("semantic spaces: kit generator")
|
10 |
st.subheader("generate drum kits and audio effects with text prompts")
|
11 |
st.write("uses publicly available samples from [freesound](https://zenodo.org/records/4687854) and [CLAP embeddings](https://github.com/LAION-AI/CLAP) for text-based querying")
|
12 |
st.write("hint: turn audio effects on! try weird prompts!")
|
13 |
|
14 |
with st.container(border=True):
|
15 |
# User Inputs
|
16 |
+
prompt = st.text_input("Describe your drum kit:", "warm vintage organic percussion")
|
17 |
kit_size = st.slider("Number of sounds per instrument:", 1, 10, 4)
|
18 |
use_fx = st.toggle("Apply audio effects?", value=True)
|
19 |
if use_fx:
|
fx.py
CHANGED
@@ -41,7 +41,9 @@ def apply_fx(audio_path, params, write_wav=True, output_dir="processed_audio"):
|
|
41 |
Bitcrush(bit_depth=params['bit_depth']),
|
42 |
Reverb(room_size=params['reverb_size'], wet_level=params['reverb_wet'])
|
43 |
])
|
|
|
44 |
processed_audio = board(audio, sr)
|
|
|
45 |
if write_wav:
|
46 |
# Determine output directory dynamically
|
47 |
base_dir = os.path.dirname(os.path.dirname(audio_path)) # Get 'dataset' level
|
@@ -84,12 +86,12 @@ def get_params_dict(params_list):
|
|
84 |
|
85 |
# Define parameter search space
|
86 |
search_space = [
|
87 |
-
Real(
|
88 |
Real(50, 1000, name="highpass"),
|
89 |
Real(0.0, 0.8, name="reverb_size"),
|
90 |
Real(0.0, 1.0, name="reverb_wet"),
|
91 |
-
Real(0.0,
|
92 |
-
Real(
|
93 |
]
|
94 |
|
95 |
##### Main function #####
|
|
|
41 |
Bitcrush(bit_depth=params['bit_depth']),
|
42 |
Reverb(room_size=params['reverb_size'], wet_level=params['reverb_wet'])
|
43 |
])
|
44 |
+
|
45 |
processed_audio = board(audio, sr)
|
46 |
+
|
47 |
if write_wav:
|
48 |
# Determine output directory dynamically
|
49 |
base_dir = os.path.dirname(os.path.dirname(audio_path)) # Get 'dataset' level
|
|
|
86 |
|
87 |
# Define parameter search space
|
88 |
search_space = [
|
89 |
+
Real(4000, 20000, name="lowpass"),
|
90 |
Real(50, 1000, name="highpass"),
|
91 |
Real(0.0, 0.8, name="reverb_size"),
|
92 |
Real(0.0, 1.0, name="reverb_wet"),
|
93 |
+
Real(0.0, 10.0, name="drive_db"),
|
94 |
+
Real(4.0, 32.0, name="bit_depth")
|
95 |
]
|
96 |
|
97 |
##### Main function #####
|