Spaces:
Sleeping
Sleeping
import cv2 | |
import numpy as np | |
import tensorflow as tf | |
from moviepy import VideoFileClip, concatenate_videoclips | |
import gradio as gr | |
from tqdm import tqdm | |
import os | |
import logging | |
from datetime import datetime | |
# --- Configuration --- | |
MODEL_PATH = 'model.h5' | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
# --- Load Model --- | |
tf.get_logger().setLevel('ERROR') | |
model = tf.keras.models.load_model(MODEL_PATH) | |
tf.get_logger().setLevel('INFO') | |
logging.info("AI model loaded successfully.") | |
JUMPSCARE_CLASS_INDEX = 0 | |
logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.") | |
def predict_frame_is_jumpscare(frame, threshold): | |
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
resized_frame = cv2.resize(rgb_frame, (128, 128)) | |
img_array = np.array(resized_frame) / 255.0 | |
img_array = np.expand_dims(img_array, axis=0) | |
prediction = model.predict(img_array, verbose=0) | |
jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX] | |
return jumpscare_probability > threshold | |
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()): | |
try: | |
threshold = sensitivity / 100.0 | |
analysis_fps = 10 | |
# --- Buffers and Gap settings --- | |
pre_scare_buffer = 1.0 # Seconds to add before a scare starts | |
post_scare_buffer = 1.5 # Seconds to add after a scare ends | |
# Maximum time between two detections to be considered the SAME jumpscare event | |
MAX_GAP_BETWEEN_DETECTIONS = 2.0 | |
logging.info(f"Starting analysis. Sensitivity={sensitivity}, Threshold={threshold}") | |
original_clip = VideoFileClip(video_path) | |
jumpscare_times = [] | |
total_frames = int(original_clip.duration * analysis_fps) | |
progress(0, desc="Analyzing Frames...") | |
for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")): | |
current_time = i / analysis_fps | |
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s") | |
if predict_frame_is_jumpscare(frame, threshold): | |
jumpscare_times.append(current_time) | |
if not jumpscare_times: | |
raise gr.Error("No jumpscares detected. Try a lower sensitivity value or check the AI model.") | |
# --- REWRITTEN MERGING LOGIC --- | |
logging.info(f"Found {len(jumpscare_times)} jumpscare frames. Merging into distinct clips.") | |
merged_segments = [] | |
if jumpscare_times: | |
# Start the first segment | |
start_of_segment = jumpscare_times[0] | |
end_of_segment = jumpscare_times[0] | |
for i in range(1, len(jumpscare_times)): | |
# If the gap to the last detection is too large, it's a new event | |
if jumpscare_times[i] > end_of_segment + MAX_GAP_BETWEEN_DETECTIONS: | |
# Finalize the previous segment by adding buffers | |
merged_segments.append(( | |
max(0, start_of_segment - pre_scare_buffer), | |
min(original_clip.duration, end_of_segment + post_scare_buffer) | |
)) | |
# Start a new segment | |
start_of_segment = jumpscare_times[i] | |
# Always update the end time of the current segment | |
end_of_segment = jumpscare_times[i] | |
# Add the very last segment after the loop finishes | |
merged_segments.append(( | |
max(0, start_of_segment - pre_scare_buffer), | |
min(original_clip.duration, end_of_segment + post_scare_buffer) | |
)) | |
if not merged_segments: | |
raise gr.Error("Could not form any clips from the detected jumpscares.") | |
logging.info(f"Created {len(merged_segments)} clips to stitch together.") | |
progress(0.9, desc="Stitching clips together...") | |
final_clips = [original_clip.subclipped(start, end) for start, end in merged_segments] | |
final_video = concatenate_videoclips(final_clips, method="compose") | |
output_path = f"jumpscare_compilation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" | |
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac") | |
original_clip.close() | |
final_video.close() | |
return output_path | |
except Exception as e: | |
logging.error(f"An error occurred: {e}", exc_info=True) | |
raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}") | |
# --- Gradio Interface --- | |
iface = gr.Interface( | |
fn=generate_jumpscare_compilation, | |
inputs=[ | |
gr.Video(label="Upload FNAF Video"), | |
gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity") | |
], | |
outputs=gr.Video(label="Jumpscare Compilation"), | |
title="AI FNAF Jumpscare Dump Generator" | |
) | |
# --- Launch --- | |
if __name__ == "__main__": | |
iface.launch() |