Spaces:
Sleeping
Sleeping
import cv2 | |
from moviepy.editor import VideoFileClip, concatenate_videoclips | |
import tempfile | |
import os | |
class ClipGenerator: | |
def __init__(self, target_duration: int = 60, min_confidence: float = 0.9): | |
self.target_duration = target_duration # durée cible en secondes | |
self.min_confidence = min_confidence | |
print(f"[DEBUG] Initialized ClipGenerator with target_duration={target_duration}s, min_confidence={min_confidence}") | |
def _time_to_seconds(self, time_str: str) -> float: | |
h, m, s = time_str.split(':') | |
return int(h) * 3600 + int(m) * 60 + float(s) | |
def _seconds_to_time(self, seconds: float) -> str: | |
h = int(seconds // 3600) | |
m = int((seconds % 3600) // 60) | |
s = seconds % 60 | |
return f"{h:02d}:{m:02d}:{s:06.3f}" | |
def generate_clips(self, video_path: str, scenes: list, sport_id: str) -> list: | |
print(f"\n[DEBUG] Starting clip generation for {sport_id}") | |
print(f"[DEBUG] Input scenes: {len(scenes)}") | |
# Debug chaque scène avant filtrage | |
for scene in scenes: | |
print(f"[DEBUG] Pre-filter scene:") | |
print(f" - Start: {scene['start']}") | |
print(f" - End: {scene['end']}") | |
print(f" - Sport: {scene['recognized_sport']}") | |
print(f" - Confidence: {scene['confidence']}") | |
print(f" - Would pass sport filter: {scene['recognized_sport'] == sport_id}") | |
print(f" - Would pass confidence filter: {scene['confidence'] >= self.min_confidence}") | |
filtered_scenes = [ | |
scene for scene in scenes | |
if scene["recognized_sport"].lower() == sport_id.lower() | |
and scene["confidence"] >= self.min_confidence | |
] | |
print(f"[DEBUG] Filtered scenes: {len(filtered_scenes)}") | |
for scene in filtered_scenes: | |
print(f"[DEBUG] Scene: {scene['start']} -> {scene['end']} (conf: {scene['confidence']:.2%}) sport_id: {sport_id}") | |
if not filtered_scenes: | |
print("[DEBUG] No valid scenes found after filtering") | |
return [] | |
clips_data = [] | |
current_segments = [] | |
current_duration = 0 | |
clip_number = 1 | |
for scene in filtered_scenes: | |
print(f"\n[DEBUG] Processing scene {scene['start']} -> {scene['end']}") | |
scene_start = self._time_to_seconds(scene["start"]) | |
scene_end = self._time_to_seconds(scene["end"]) | |
scene_duration = scene_end - scene_start | |
print(f"[DEBUG] Scene duration: {scene_duration:.2f}s") | |
print(f"[DEBUG] Current accumulated duration: {current_duration:.2f}s") | |
if current_duration + scene_duration > self.target_duration: | |
remaining_duration = self.target_duration - current_duration | |
print(f"[DEBUG] Scene would exceed target duration. Remaining space: {remaining_duration:.2f}s") | |
if remaining_duration > 0: | |
segment_end = self._seconds_to_time(scene_start + remaining_duration) | |
current_segments.append({ | |
"start": scene["start"], | |
"end": segment_end, | |
"confidence": scene["confidence"] | |
}) | |
print(f"[DEBUG] Added partial segment: {scene['start']} -> {segment_end}") | |
clip_data = self._create_clip(video_path, current_segments, clip_number) | |
clips_data.append(clip_data) | |
print(f"[DEBUG] Created clip {clip_number} with {len(current_segments)} segments") | |
clip_number += 1 | |
current_segments = [] | |
current_duration = 0 | |
remaining_scene_duration = scene_duration - remaining_duration | |
print(f"[DEBUG] Remaining scene duration to process: {remaining_scene_duration:.2f}s") | |
while remaining_scene_duration > 0: | |
if remaining_scene_duration >= self.target_duration: | |
segment_start = self._seconds_to_time(scene_start + scene_duration - remaining_scene_duration) | |
segment_end = self._seconds_to_time(scene_start + scene_duration - remaining_scene_duration + self.target_duration) | |
segment = { | |
"start": segment_start, | |
"end": segment_end, | |
"confidence": scene["confidence"] | |
} | |
print(f"[DEBUG] Creating full clip from remaining: {segment_start} -> {segment_end}") | |
clip_data = self._create_clip(video_path, [segment], clip_number) | |
clips_data.append(clip_data) | |
clip_number += 1 | |
remaining_scene_duration -= self.target_duration | |
else: | |
segment_start = self._seconds_to_time(scene_end - remaining_scene_duration) | |
current_segments = [{ | |
"start": segment_start, | |
"end": scene["end"], | |
"confidence": scene["confidence"] | |
}] | |
print(f"[DEBUG] Keeping remainder for next clip: {segment_start} -> {scene['end']}") | |
current_duration = remaining_scene_duration | |
break | |
else: | |
current_segments.append({ | |
"start": scene["start"], | |
"end": scene["end"], | |
"confidence": scene["confidence"] | |
}) | |
current_duration += scene_duration | |
print(f"[DEBUG] Added full scene to current clip. New duration: {current_duration:.2f}s") | |
if current_segments: | |
print(f"\n[DEBUG] Processing remaining segments") | |
clip_data = self._create_clip(video_path, current_segments, clip_number) | |
clips_data.append(clip_data) | |
print(f"[DEBUG] Created final clip {clip_number} with {len(current_segments)} segments") | |
print(f"\n[DEBUG] Generated {len(clips_data)} clips in total") | |
return clips_data | |
def _create_clip(self, video_path: str, segments: list, clip_number: int) -> dict: | |
"""Crée un clip à partir des segments donnés.""" | |
total_duration = sum( | |
self._time_to_seconds(segment["end"]) - self._time_to_seconds(segment["start"]) | |
for segment in segments | |
) | |
print(f"[DEBUG] Creating clip {clip_number}") | |
print(f"[DEBUG] Total duration: {total_duration:.2f}s") | |
# Créer les sous-clips pour chaque segment | |
subclips = [] | |
video = VideoFileClip(video_path) | |
for segment in segments: | |
start_time = self._time_to_seconds(segment["start"]) | |
end_time = self._time_to_seconds(segment["end"]) | |
print(f"[DEBUG] Extracting segment: {segment['start']} -> {segment['end']}") | |
subclip = video.subclip(start_time, end_time) | |
subclips.append(subclip) | |
# Concaténer tous les sous-clips | |
final_clip = concatenate_videoclips(subclips) | |
# Créer un fichier temporaire pour le clip | |
temp_clip_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) | |
final_clip.write_videofile( | |
temp_clip_file.name, | |
codec='libx264', | |
audio_codec='aac', | |
temp_audiofile='temp-audio.m4a', | |
remove_temp=True | |
) | |
# Fermer les clips pour libérer la mémoire | |
final_clip.close() | |
for subclip in subclips: | |
subclip.close() | |
video.close() | |
total_confidence = sum(segment["confidence"] for segment in segments) | |
avg_confidence = total_confidence / len(segments) | |
return { | |
"segments": segments, | |
"clip_number": clip_number, | |
"confidence": avg_confidence, | |
"duration": total_duration, | |
"file_path": temp_clip_file.name # Ajouter le chemin du fichier | |
} | |
if __name__ == "__main__": | |
# Test local | |
test_video_path = "test_video.mp4" # Remplacer par un chemin réel | |
test_scenes = [ | |
{ | |
"start": "00:00:00.000", | |
"end": "00:00:30.000", | |
"recognized_sport": "surf", | |
"confidence": 0.95 | |
}, | |
{ | |
"start": "00:00:40.000", | |
"end": "00:01:20.000", | |
"recognized_sport": "surf", | |
"confidence": 0.92 | |
}, | |
{ | |
"start": "00:01:30.000", | |
"end": "00:02:00.000", | |
"recognized_sport": "not_surf", | |
"confidence": 0.88 | |
} | |
] | |
clip_generator = ClipGenerator(target_duration=60, min_confidence=0.9) | |
clips = clip_generator.generate_clips(test_video_path, test_scenes, "surf") | |
print("\nClips générés :") | |
for i, clip in enumerate(clips, 1): | |
print(f"\nClip {i}:") | |
print(f"Confidence: {clip['confidence']:.2%}") | |
print("Segments:") | |
for segment in clip['segments']: | |
print(f" {segment['start']} -> {segment['end']} (conf: {segment['confidence']:.2%})") |