szili2011 commited on
Commit
b8fc09b
·
verified ·
1 Parent(s): 8568d0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -25
app.py CHANGED
@@ -8,6 +8,17 @@ import pyrubberband as rb
8
  import soundfile as sf
9
  from pedalboard import Pedalboard, Reverb
10
 
 
 
 
 
 
 
 
 
 
 
 
11
  # --- Musical Sequence Generation ---
12
 
13
  def generate_musical_sequence(mode="Scale", scale_name="Major", root_note="C4", num_notes=50):
@@ -19,14 +30,12 @@ def generate_musical_sequence(mode="Scale", scale_name="Major", root_note="C4",
19
  except Exception as e:
20
  raise ValueError(f"Invalid root note. Use scientific notation like 'C4'. Error: {e}")
21
 
22
- # Define scale intervals
23
  intervals = {
24
  "Chromatic": [1], "Major": [2, 2, 1, 2, 2, 2, 1], "Natural Minor": [2, 1, 2, 2, 1, 2, 2],
25
  "Harmonic Minor": [2, 1, 2, 2, 1, 3, 1], "Pentatonic Major": [2, 2, 3, 2, 3]
26
  }
27
  scale_intervals = intervals.get(scale_name, intervals["Chromatic"])
28
 
29
- # Generate the base scale notes
30
  scale_notes = [current_midi]
31
  interval_index = 0
32
  while len(scale_notes) < num_notes:
@@ -38,11 +47,9 @@ def generate_musical_sequence(mode="Scale", scale_name="Major", root_note="C4",
38
  return scale_notes[:num_notes]
39
 
40
  if mode == "Arpeggio":
41
- # Extract chord tones (1st, 3rd, 5th) from the generated scale
42
  arpeggio_tones = [scale_notes[i] for i in [0, 2, 4]]
43
  if not arpeggio_tones: return []
44
 
45
- # Build the sequence by cycling through chord tones and octaves
46
  sequence = []
47
  octave_offset = 0
48
  tone_index = 0
@@ -89,15 +96,12 @@ def audio_orchestrator(
89
  if len(y) / sr < 0.1:
90
  raise gr.Error("Source audio is too short. Please use a file that is at least 0.1 seconds long.")
91
 
92
- # 1. Generate the target musical sequence (scale or arpeggio)
93
  progress(0.1, desc=f"Generating {mode}...")
94
  target_midi_notes = generate_musical_sequence(mode, scale_name, root_note, num_notes)
95
 
96
- # 2. Process each note in the sequence
97
  final_y = np.array([], dtype=np.float32)
98
  silence_samples = np.zeros(int(silence_s * sr), dtype=np.float32)
99
 
100
- all_rms = []
101
  source_segments = [get_random_segment(y, sr) for _ in range(num_notes)]
102
  all_rms = [rms for _, rms in source_segments if rms > 0]
103
  max_rms = max(all_rms) if all_rms else 1.0
@@ -107,30 +111,25 @@ def audio_orchestrator(
107
  note_name = librosa.midi_to_note(target_note)
108
  progress(0.2 + (i / num_notes) * 0.7, desc=f"Processing Note {i+1}/{num_notes}: {note_name}")
109
 
110
- # Time-stretch segment to the target note duration
111
  source_duration = len(segment_y) / sr
112
  stretch_rate = source_duration / note_duration_s if note_duration_s > 0 else 1
113
  stretched_y = rb.time_stretch(segment_y, sr, stretch_rate)
114
 
115
- # Pitch-shift the stretched segment
116
- source_pitch_hz = librosa.note_to_hz('C4') # Assume a neutral source pitch
117
  target_pitch_hz = librosa.midi_to_hz(target_note)
118
  pitch_shift_ratio = target_pitch_hz / source_pitch_hz
119
  shifted_y = rb.pitch_shift(stretched_y, sr, pitch_shift_ratio)
120
 
121
- # Apply dynamic expression based on original segment's volume
122
  volume_factor = (rms / max_rms) * 0.9 + 0.1
123
  shifted_y *= volume_factor
124
 
125
  final_y = np.concatenate((final_y, shifted_y, silence_samples))
126
 
127
- # 3. Apply final effects processing
128
  progress(0.95, desc="Applying final effects...")
129
  if reverb_mix > 0:
130
  board = Pedalboard([Reverb(room_size=reverb_room_size, wet_level=reverb_mix, dry_level=1.0 - reverb_mix)])
131
  final_y = board(final_y, sr)
132
 
133
- # 4. Export final audio
134
  output_dir = "generated_music"
135
  os.makedirs(output_dir, exist_ok=True)
136
  output_filename = f"{mode.lower()}_{scale_name.lower().replace(' ', '_')}_{root_note}.wav"
@@ -158,8 +157,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="orange"))
158
  mode = gr.Radio(["Scale", "Arpeggio"], label="Generation Mode", value="Arpeggio")
159
  scale_name = gr.Dropdown(["Major", "Natural Minor", "Harmonic Minor", "Pentatonic Major", "Chromatic"], label="Scale", value="Major")
160
 
161
- # FIX IS HERE: Converted the numpy array from librosa to a python list with .tolist()
162
- root_note_choices = librosa.midi_to_note(list(range(36, 73))).tolist() # C2 to C5
163
  root_note = gr.Dropdown(root_note_choices, label="Root Note", value="C4")
164
 
165
  num_notes = gr.Slider(10, 200, value=70, step=1, label="Number of Notes")
@@ -189,6 +187,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="orange"))
189
  )
190
 
191
  gr.Examples(
 
192
  examples=[
193
  ["example_vocal.wav", "Arpeggio", "Natural Minor", "A3", 80, 0.15, 0.1, 0.4, 0.7],
194
  ["example_drums.wav", "Scale", "Pentatonic Major", "C3", 50, 0.25, 0.0, 0.2, 0.8],
@@ -199,17 +198,9 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="orange"))
199
  note_duration_s, silence_s, reverb_mix, reverb_room_size
200
  ],
201
  outputs=[audio_output],
202
- cache_examples=True # Recommended for HF Spaces
 
203
  )
204
 
205
  if __name__ == "__main__":
206
- # Create dummy files for local testing of the examples
207
- # Check if files exist before creating them
208
- if not os.path.exists("example_vocal.wav"):
209
- sf.write("example_vocal.wav", np.random.randn(44100 * 2), 44100)
210
- if not os.path.exists("example_drums.wav"):
211
- sf.write("example_drums.wav", np.random.randn(44100 * 2), 44100)
212
- if not os.path.exists("example_synth.wav"):
213
- sf.write("example_synth.wav", np.random.randn(44100 * 2), 44100)
214
-
215
  demo.launch(debug=True)
 
8
  import soundfile as sf
9
  from pedalboard import Pedalboard, Reverb
10
 
11
+ # --- Create dummy files for examples. This runs on server startup. ---
12
+ # This is the key fix: This code is now in the global scope.
13
+ os.makedirs("generated_music", exist_ok=True)
14
+ if not os.path.exists("example_vocal.wav"):
15
+ sf.write("example_vocal.wav", np.random.randn(44100 * 2), 44100)
16
+ if not os.path.exists("example_drums.wav"):
17
+ sf.write("example_drums.wav", np.random.randn(44100 * 2), 44100)
18
+ if not os.path.exists("example_synth.wav"):
19
+ sf.write("example_synth.wav", np.random.randn(44100 * 2), 44100)
20
+
21
+
22
  # --- Musical Sequence Generation ---
23
 
24
  def generate_musical_sequence(mode="Scale", scale_name="Major", root_note="C4", num_notes=50):
 
30
  except Exception as e:
31
  raise ValueError(f"Invalid root note. Use scientific notation like 'C4'. Error: {e}")
32
 
 
33
  intervals = {
34
  "Chromatic": [1], "Major": [2, 2, 1, 2, 2, 2, 1], "Natural Minor": [2, 1, 2, 2, 1, 2, 2],
35
  "Harmonic Minor": [2, 1, 2, 2, 1, 3, 1], "Pentatonic Major": [2, 2, 3, 2, 3]
36
  }
37
  scale_intervals = intervals.get(scale_name, intervals["Chromatic"])
38
 
 
39
  scale_notes = [current_midi]
40
  interval_index = 0
41
  while len(scale_notes) < num_notes:
 
47
  return scale_notes[:num_notes]
48
 
49
  if mode == "Arpeggio":
 
50
  arpeggio_tones = [scale_notes[i] for i in [0, 2, 4]]
51
  if not arpeggio_tones: return []
52
 
 
53
  sequence = []
54
  octave_offset = 0
55
  tone_index = 0
 
96
  if len(y) / sr < 0.1:
97
  raise gr.Error("Source audio is too short. Please use a file that is at least 0.1 seconds long.")
98
 
 
99
  progress(0.1, desc=f"Generating {mode}...")
100
  target_midi_notes = generate_musical_sequence(mode, scale_name, root_note, num_notes)
101
 
 
102
  final_y = np.array([], dtype=np.float32)
103
  silence_samples = np.zeros(int(silence_s * sr), dtype=np.float32)
104
 
 
105
  source_segments = [get_random_segment(y, sr) for _ in range(num_notes)]
106
  all_rms = [rms for _, rms in source_segments if rms > 0]
107
  max_rms = max(all_rms) if all_rms else 1.0
 
111
  note_name = librosa.midi_to_note(target_note)
112
  progress(0.2 + (i / num_notes) * 0.7, desc=f"Processing Note {i+1}/{num_notes}: {note_name}")
113
 
 
114
  source_duration = len(segment_y) / sr
115
  stretch_rate = source_duration / note_duration_s if note_duration_s > 0 else 1
116
  stretched_y = rb.time_stretch(segment_y, sr, stretch_rate)
117
 
118
+ source_pitch_hz = librosa.note_to_hz('C4')
 
119
  target_pitch_hz = librosa.midi_to_hz(target_note)
120
  pitch_shift_ratio = target_pitch_hz / source_pitch_hz
121
  shifted_y = rb.pitch_shift(stretched_y, sr, pitch_shift_ratio)
122
 
 
123
  volume_factor = (rms / max_rms) * 0.9 + 0.1
124
  shifted_y *= volume_factor
125
 
126
  final_y = np.concatenate((final_y, shifted_y, silence_samples))
127
 
 
128
  progress(0.95, desc="Applying final effects...")
129
  if reverb_mix > 0:
130
  board = Pedalboard([Reverb(room_size=reverb_room_size, wet_level=reverb_mix, dry_level=1.0 - reverb_mix)])
131
  final_y = board(final_y, sr)
132
 
 
133
  output_dir = "generated_music"
134
  os.makedirs(output_dir, exist_ok=True)
135
  output_filename = f"{mode.lower()}_{scale_name.lower().replace(' ', '_')}_{root_note}.wav"
 
157
  mode = gr.Radio(["Scale", "Arpeggio"], label="Generation Mode", value="Arpeggio")
158
  scale_name = gr.Dropdown(["Major", "Natural Minor", "Harmonic Minor", "Pentatonic Major", "Chromatic"], label="Scale", value="Major")
159
 
160
+ root_note_choices = librosa.midi_to_note(list(range(36, 73))).tolist()
 
161
  root_note = gr.Dropdown(root_note_choices, label="Root Note", value="C4")
162
 
163
  num_notes = gr.Slider(10, 200, value=70, step=1, label="Number of Notes")
 
187
  )
188
 
189
  gr.Examples(
190
+ # FIX IS HERE: Using simple filenames because the files are now in the root directory.
191
  examples=[
192
  ["example_vocal.wav", "Arpeggio", "Natural Minor", "A3", 80, 0.15, 0.1, 0.4, 0.7],
193
  ["example_drums.wav", "Scale", "Pentatonic Major", "C3", 50, 0.25, 0.0, 0.2, 0.8],
 
198
  note_duration_s, silence_s, reverb_mix, reverb_room_size
199
  ],
200
  outputs=[audio_output],
201
+ fn=audio_orchestrator,
202
+ cache_examples=True
203
  )
204
 
205
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
206
  demo.launch(debug=True)