Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,73 +1,150 @@
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
-
|
4 |
-
from moviepy.editor import VideoFileClip
|
5 |
import gradio as gr
|
|
|
|
|
6 |
|
7 |
-
#
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
img_array = np.array(resized_frame) / 255.0
|
15 |
-
img_array = img_array.reshape(1, 128, 128, 3)
|
16 |
|
17 |
-
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
|
29 |
-
#
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
-
|
36 |
-
|
|
|
|
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
output_video = VideoFileClip('output_dumpscare.mp4')
|
52 |
-
final_video = output_video.set_audio(original_video.audio)
|
53 |
-
final_video.write_videofile('final_output_dumpscare.mp4', codec='libx264')
|
54 |
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
# Gradio Interface
|
58 |
-
def gradio_interface(video, sensitivity):
|
59 |
-
output_video = convert_video_to_dumpscare(video, sensitivity)
|
60 |
-
return output_video
|
61 |
|
62 |
-
#
|
|
|
63 |
iface = gr.Interface(
|
64 |
-
fn=
|
65 |
inputs=[
|
66 |
-
gr.Video(label="
|
67 |
-
gr.Slider(
|
|
|
68 |
],
|
69 |
-
outputs=gr.Video(label="
|
|
|
|
|
|
|
70 |
)
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
-
iface.launch()
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
5 |
import gradio as gr
|
6 |
+
from tqdm import tqdm
|
7 |
+
import os
|
8 |
|
9 |
+
# --- Model Loading ---
|
10 |
+
# Ensure you have a 'model.h5' file in the same directory.
|
11 |
+
# This model should be trained to output a value close to 1 for a jumpscare frame
|
12 |
+
# and close to 0 for a non-jumpscare frame.
|
13 |
+
MODEL_PATH = 'model.h5'
|
14 |
+
if not os.path.exists(MODEL_PATH):
|
15 |
+
raise FileNotFoundError(f"Model file not found at {MODEL_PATH}. Please ensure a trained model exists.")
|
16 |
+
model = tf.keras.models.load_model(MODEL_PATH)
|
17 |
|
18 |
+
|
19 |
+
def predict_frame_is_jumpscare(frame, threshold):
|
20 |
+
"""
|
21 |
+
Analyzes a single video frame and predicts if it's a jumpscare.
|
|
|
|
|
22 |
|
23 |
+
Args:
|
24 |
+
frame (np.array): A single video frame in BGR format from OpenCV.
|
25 |
+
threshold (float): The sensitivity threshold (0.0 to 1.0) for the prediction.
|
26 |
|
27 |
+
Returns:
|
28 |
+
bool: True if the frame is predicted as a jumpscare, False otherwise.
|
29 |
+
"""
|
30 |
+
# 1. Preprocess the frame for the model
|
31 |
+
# Convert BGR (OpenCV default) to RGB (model was likely trained on RGB)
|
32 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
33 |
+
# Resize to the model's expected input size (e.g., 128x128)
|
34 |
+
resized_frame = cv2.resize(rgb_frame, (128, 128))
|
35 |
+
# Normalize pixel values to be between 0 and 1
|
36 |
+
img_array = np.array(resized_frame) / 255.0
|
37 |
+
# Add a batch dimension (model expects shape: 1, height, width, channels)
|
38 |
+
img_array = np.expand_dims(img_array, axis=0)
|
39 |
|
40 |
+
# 2. Make a prediction
|
41 |
+
prediction = model.predict(img_array, verbose=0) # verbose=0 silences a lot of Keras logs
|
42 |
|
43 |
+
# 3. Return True/False based on the prediction and threshold
|
44 |
+
# We assume the model's output is a single value in prediction[0][0]
|
45 |
+
return prediction[0][0] > threshold
|
46 |
+
|
47 |
+
|
48 |
+
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
|
49 |
+
"""
|
50 |
+
Analyzes a video, finds jumpscare segments, and creates a compilation.
|
51 |
|
52 |
+
Args:
|
53 |
+
video_path (str): The file path to the input video.
|
54 |
+
sensitivity (int): An integer from 0 to 100 for detection sensitivity.
|
55 |
+
progress (gr.Progress): Gradio progress tracker.
|
56 |
|
57 |
+
Returns:
|
58 |
+
str or None: The file path to the output video, or None if no jumpscares are found.
|
59 |
+
"""
|
60 |
+
# Convert sensitivity (0-100) to a model threshold (0.0-1.0)
|
61 |
+
threshold = sensitivity / 100.0
|
62 |
+
|
63 |
+
# Use a lower FPS for analysis to speed things up. Jumpscares usually last
|
64 |
+
# for several frames, so we don't need to check every single one.
|
65 |
+
analysis_fps = 10
|
66 |
+
|
67 |
+
print("Loading video file...")
|
68 |
+
original_clip = VideoFileClip(video_path)
|
69 |
+
|
70 |
+
jumpscare_segments = []
|
71 |
+
is_in_jumpscare_segment = False
|
72 |
+
segment_start_time = 0
|
73 |
+
|
74 |
+
print(f"Analyzing video for jumpscares with threshold {threshold}...")
|
75 |
+
|
76 |
+
# Use tqdm for a console progress bar and gr.Progress for the UI
|
77 |
+
total_frames = int(original_clip.duration * analysis_fps)
|
78 |
+
frame_iterator = original_clip.iter_frames(fps=analysis_fps)
|
79 |
+
|
80 |
+
for i, frame in enumerate(tqdm(frame_iterator, total=total_frames, desc="Analyzing Frames")):
|
81 |
+
current_time = i / analysis_fps
|
82 |
+
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
|
83 |
+
|
84 |
+
is_jumpscare = predict_frame_is_jumpscare(frame, threshold)
|
85 |
|
86 |
+
# --- State machine to find continuous jumpscare segments ---
|
87 |
+
if is_jumpscare and not is_in_jumpscare_segment:
|
88 |
+
# Start of a new jumpscare segment
|
89 |
+
is_in_jumpscare_segment = True
|
90 |
+
segment_start_time = current_time
|
91 |
+
|
92 |
+
elif not is_jumpscare and is_in_jumpscare_segment:
|
93 |
+
# End of the current jumpscare segment
|
94 |
+
is_in_jumpscare_segment = False
|
95 |
+
# Add a small buffer to the end time to capture the full scare
|
96 |
+
segment_end_time = current_time + 0.5
|
97 |
+
jumpscare_segments.append((segment_start_time, segment_end_time))
|
98 |
+
|
99 |
+
# If the video ends while in a jumpscare, close the last segment
|
100 |
+
if is_in_jumpscare_segment:
|
101 |
+
jumpscare_segments.append((segment_start_time, original_clip.duration))
|
102 |
|
103 |
+
# --- Create the final video ---
|
104 |
+
if not jumpscare_segments:
|
105 |
+
print("No jumpscares found.")
|
106 |
+
# We need to return a value that Gradio can handle.
|
107 |
+
# Returning None will cause an error. Instead, we can return a message.
|
108 |
+
# A better approach would be to update a gr.Text component, but for simplicity,
|
109 |
+
# we'll raise an error that Gradio can display.
|
110 |
+
raise gr.Error("No jumpscares were detected with the current sensitivity setting. Try a lower value.")
|
111 |
|
112 |
+
print(f"Found {len(jumpscare_segments)} jumpscare segments. Creating compilation...")
|
113 |
+
progress(0.9, desc="Stitching clips together...")
|
|
|
|
|
|
|
114 |
|
115 |
+
# Create subclips from the original video using the detected timestamps
|
116 |
+
final_clips = [original_clip.subclip(start, end) for start, end in jumpscare_segments]
|
117 |
+
|
118 |
+
# Concatenate all the jumpscare clips into one video
|
119 |
+
final_video = concatenate_videoclips(final_clips)
|
120 |
+
|
121 |
+
output_path = "fnaf_jumpscare_compilation.mp4"
|
122 |
+
# Write the final video file, including audio
|
123 |
+
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac", temp_audiofile='temp-audio.m4a', remove_temp=True)
|
124 |
+
|
125 |
+
# Close the clips to free up resources
|
126 |
+
original_clip.close()
|
127 |
+
for clip in final_clips:
|
128 |
+
clip.close()
|
129 |
+
|
130 |
+
print(f"Compilation saved to {output_path}")
|
131 |
+
return output_path
|
132 |
|
|
|
|
|
|
|
|
|
133 |
|
134 |
+
# --- Gradio Interface ---
|
135 |
+
# Create a Gradio interface for the function
|
136 |
iface = gr.Interface(
|
137 |
+
fn=generate_jumpscare_compilation,
|
138 |
inputs=[
|
139 |
+
gr.Video(label="Upload FNAF Video"),
|
140 |
+
gr.Slider(minimum=1, maximum=100, step=1, value=70, label="Detection Sensitivity",
|
141 |
+
info="Higher values mean the AI needs to be more certain it's a jumpscare. Lower values will detect more, but might have false positives.")
|
142 |
],
|
143 |
+
outputs=gr.Video(label="Jumpscare Compilation"),
|
144 |
+
title="AI FNAF Jumpscare Dump Generator",
|
145 |
+
description="Upload a video, and this tool will use an AI model to find all the jumpscares and compile them into a single video. This requires a `model.h5` file.",
|
146 |
+
allow_flagging="never"
|
147 |
)
|
148 |
|
149 |
if __name__ == "__main__":
|
150 |
+
iface.launch()
|