|
import os |
|
from typing import List |
|
import librosa |
|
import numpy as np |
|
from scipy.signal import butter, lfilter, freqz |
|
from pydub import AudioSegment |
|
from pathlib import Path |
|
|
|
|
|
INPUT_DIR = "./data/crudo/clips/5_segundos/no_cotorra" |
|
OUTPUT_DIR = "./data/crudo/clips/3_segundos/no_cotorra" |
|
THRESHOLD_DB = -25 |
|
TARGET_DURATION = 3 |
|
|
|
def butter_bandpass(lowcut: float, highcut: float, fs: float, order: int = 5): |
|
"""Diseña un filtro pasabanda Butterworth.""" |
|
return butter(order, [lowcut, highcut], fs=fs, btype='band') |
|
|
|
def butter_bandpass_filter(data: np.ndarray, lowcut: float, highcut: float, fs: float, order: int = 5) -> np.ndarray: |
|
"""Aplica un filtro pasabanda Butterworth a los datos.""" |
|
b, a = butter_bandpass(lowcut, highcut, fs, order=order) |
|
return lfilter(b, a, data) |
|
|
|
class AudioProcessor: |
|
def __init__(self, audio_path: str, target_duration: float = 3): |
|
self.audio_path = audio_path |
|
self.target_duration = target_duration |
|
|
|
print(f"Cargando: {audio_path}") |
|
self.y, self.sr = librosa.load(audio_path, sr=None) |
|
self.duracion_total = librosa.get_duration(y=self.y, sr=self.sr) |
|
|
|
print("Aplicando filtro pasabanda (2kHz - 8kHz)...") |
|
self.y_filtered = butter_bandpass_filter(self.y, 2000, 8000, self.sr, order=5) |
|
self.y_filtered = librosa.util.normalize(self.y_filtered) |
|
print("Audio cargado y preprocesado.") |
|
|
|
def detect_events(self, threshold_db: float, window_size: float = 0.02, min_duration: float = 0.1, merge_gap: float = 0.5) -> List[tuple[float, float]]: |
|
window_length = int(window_size * self.sr) |
|
rms = librosa.feature.rms(y=self.y_filtered, frame_length=window_length, hop_length=window_length // 2)[0] |
|
times = librosa.frames_to_time(np.arange(len(rms)), sr=self.sr, hop_length=window_length // 2) |
|
rms_db = librosa.amplitude_to_db(rms, ref=np.max) |
|
|
|
events = [] |
|
in_event = False |
|
event_start = -1 |
|
|
|
for i, db in enumerate(rms_db): |
|
if db > threshold_db and not in_event: |
|
in_event = True |
|
event_start = times[i] |
|
elif db <= threshold_db and in_event: |
|
in_event = False |
|
event_end = times[i] |
|
if event_end - event_start >= min_duration: |
|
events.append((event_start, event_end)) |
|
|
|
if in_event and self.duracion_total - event_start >= min_duration: |
|
events.append((event_start, self.duracion_total)) |
|
|
|
|
|
merged = [] |
|
if events: |
|
cur_start, cur_end = events[0] |
|
for start, end in events[1:]: |
|
if start - cur_end < merge_gap: |
|
cur_end = end |
|
else: |
|
merged.append((cur_start, cur_end)) |
|
cur_start, cur_end = start, end |
|
merged.append((cur_start, cur_end)) |
|
return merged |
|
|
|
def generate_clips(self, threshold_db: float, output_dir: str) -> List[AudioSegment]: |
|
events = self.detect_events(threshold_db) |
|
audio_pydub = AudioSegment.from_file(self.audio_path) |
|
clips = [] |
|
last_center = -float('inf') |
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
base_name = Path(self.audio_path).stem |
|
|
|
for i, (start, end) in enumerate(events): |
|
center = (start + end) / 2 |
|
if abs(center - last_center) < self.target_duration: |
|
continue |
|
|
|
clip_start = max(0, int((center - self.target_duration / 2) * 1000)) |
|
clip_end = min(len(audio_pydub), int((center + self.target_duration / 2) * 1000)) |
|
|
|
if (clip_end - clip_start) >= int((self.target_duration - 1) * 1000): |
|
clip = audio_pydub[clip_start:clip_end] |
|
clips.append(clip) |
|
last_center = center |
|
output_path = os.path.join(output_dir, f"{base_name}_clip_{i+1}.wav") |
|
clip.export(output_path, format="wav") |
|
print(f"Guardado: {output_path} (centro: {center:.2f}s)") |
|
return clips |
|
|
|
def process_directory(input_dir: str, output_dir: str, threshold_db: float = -25, target_duration: float = 3.7): |
|
""" |
|
Procesa todos los archivos de audio en un directorio y extrae clips. |
|
|
|
Args: |
|
input_dir: Directorio con los archivos de audio |
|
output_dir: Directorio donde se guardarán los clips |
|
threshold_db: Umbral en dB para detectar eventos |
|
target_duration: Duración objetivo de cada clip en segundos |
|
""" |
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
audio_extensions = {'.mp3', '.wav', '.m4a', '.flac', '.ogg'} |
|
audio_files = [f for f in os.listdir(input_dir) |
|
if os.path.splitext(f)[1].lower() in audio_extensions] |
|
|
|
if not audio_files: |
|
print(f"No se encontraron archivos de audio en {input_dir}") |
|
return |
|
|
|
print(f"Procesando {len(audio_files)} archivos de audio...") |
|
|
|
|
|
for audio_file in audio_files: |
|
input_path = os.path.join(input_dir, audio_file) |
|
print(f"\nProcesando: {audio_file}") |
|
|
|
try: |
|
processor = AudioProcessor(input_path, target_duration) |
|
clips = processor.generate_clips(threshold_db, output_dir) |
|
print(f"Se generaron {len(clips)} clips de {audio_file}") |
|
except Exception as e: |
|
print(f"Error procesando {audio_file}: {str(e)}") |
|
|
|
if __name__ == "__main__": |
|
|
|
process_directory(INPUT_DIR, OUTPUT_DIR, THRESHOLD_DB, TARGET_DURATION) |