Spaces:
Running
Running
File size: 5,079 Bytes
d8c37f0 bea3a5d 51c2e19 65ae963 e736c4a 51c2e19 b2c607c bea3a5d bc76e5e 0c6346c b2c607c bc76e5e 51c2e19 bc76e5e b2c607c 0c6346c bea3a5d 51c2e19 a223b54 b2c607c 51c2e19 b2c607c bc76e5e b2c607c bc76e5e b2c607c 7259665 b2c607c bc76e5e b2c607c bc76e5e b2c607c bc76e5e b2c607c 51c2e19 bc76e5e b2c607c bc76e5e b2c607c bc76e5e b2c607c e736c4a b2c607c ff5db10 bc76e5e 7259665 51c2e19 7259665 51c2e19 bc76e5e 7259665 51c2e19 bc76e5e 7259665 bc76e5e 3025b31 f1cc272 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import cv2
import numpy as np
import tensorflow as tf
from moviepy import VideoFileClip, concatenate_videoclips
import gradio as gr
from tqdm import tqdm
import os
import logging
from datetime import datetime
# --- Configuration ---
MODEL_PATH = 'model.h5'
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# --- Load Model ---
tf.get_logger().setLevel('ERROR')
model = tf.keras.models.load_model(MODEL_PATH)
tf.get_logger().setLevel('INFO')
logging.info("AI model loaded successfully.")
JUMPSCARE_CLASS_INDEX = 0
logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.")
def predict_frame_is_jumpscare(frame, threshold):
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
resized_frame = cv2.resize(rgb_frame, (128, 128))
img_array = np.array(resized_frame) / 255.0
img_array = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_array, verbose=0)
jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX]
return jumpscare_probability > threshold
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
try:
threshold = sensitivity / 100.0
analysis_fps = 10
# --- Buffers and Gap settings ---
pre_scare_buffer = 1.0 # Seconds to add before a scare starts
post_scare_buffer = 1.5 # Seconds to add after a scare ends
# Maximum time between two detections to be considered the SAME jumpscare event
MAX_GAP_BETWEEN_DETECTIONS = 2.0
logging.info(f"Starting analysis. Sensitivity={sensitivity}, Threshold={threshold}")
original_clip = VideoFileClip(video_path)
jumpscare_times = []
total_frames = int(original_clip.duration * analysis_fps)
progress(0, desc="Analyzing Frames...")
for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")):
current_time = i / analysis_fps
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
if predict_frame_is_jumpscare(frame, threshold):
jumpscare_times.append(current_time)
if not jumpscare_times:
raise gr.Error("No jumpscares detected. Try a lower sensitivity value or check the AI model.")
# --- REWRITTEN MERGING LOGIC ---
logging.info(f"Found {len(jumpscare_times)} jumpscare frames. Merging into distinct clips.")
merged_segments = []
if jumpscare_times:
# Start the first segment
start_of_segment = jumpscare_times[0]
end_of_segment = jumpscare_times[0]
for i in range(1, len(jumpscare_times)):
# If the gap to the last detection is too large, it's a new event
if jumpscare_times[i] > end_of_segment + MAX_GAP_BETWEEN_DETECTIONS:
# Finalize the previous segment by adding buffers
merged_segments.append((
max(0, start_of_segment - pre_scare_buffer),
min(original_clip.duration, end_of_segment + post_scare_buffer)
))
# Start a new segment
start_of_segment = jumpscare_times[i]
# Always update the end time of the current segment
end_of_segment = jumpscare_times[i]
# Add the very last segment after the loop finishes
merged_segments.append((
max(0, start_of_segment - pre_scare_buffer),
min(original_clip.duration, end_of_segment + post_scare_buffer)
))
if not merged_segments:
raise gr.Error("Could not form any clips from the detected jumpscares.")
logging.info(f"Created {len(merged_segments)} clips to stitch together.")
progress(0.9, desc="Stitching clips together...")
final_clips = [original_clip.subclipped(start, end) for start, end in merged_segments]
final_video = concatenate_videoclips(final_clips, method="compose")
output_path = f"jumpscare_compilation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
original_clip.close()
final_video.close()
return output_path
except Exception as e:
logging.error(f"An error occurred: {e}", exc_info=True)
raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}")
# --- Gradio Interface ---
iface = gr.Interface(
fn=generate_jumpscare_compilation,
inputs=[
gr.Video(label="Upload FNAF Video"),
gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity")
],
outputs=gr.Video(label="Jumpscare Compilation"),
title="AI FNAF Jumpscare Dump Generator"
)
# --- Launch ---
if __name__ == "__main__":
iface.launch() |