szili2011 commited on
Commit
bc76e5e
·
verified ·
1 Parent(s): 67f02fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -57
app.py CHANGED
@@ -1,133 +1,125 @@
1
  import cv2
2
  import numpy as np
3
  import tensorflow as tf
4
- from moviepy import VideoFileClip, concatenate_videoclips
5
  import gradio as gr
6
  from tqdm import tqdm
7
  import os
8
  import logging
9
  from datetime import datetime
10
 
11
- # --- IMPORTANT CHANGE: No folders needed ---
12
- # The code now assumes 'model.h5' is in the same root directory as this app.py file.
13
  MODEL_PATH = 'model.h5'
14
-
15
- # --- Setup Basic Logging ---
16
- # This will print helpful info to the Hugging Face logs for debugging.
17
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
18
 
19
  # --- Load Model ---
20
- if not os.path.exists(MODEL_PATH):
21
- error_msg = f"Model file not found at '{MODEL_PATH}'. Make sure you have uploaded your 'model.h5' to the root of your Space."
22
- logging.error(error_msg)
23
- raise FileNotFoundError(error_msg)
24
-
25
  model = tf.keras.models.load_model(MODEL_PATH)
 
26
  logging.info("AI model loaded successfully.")
27
 
28
- # Based on your training code, LabelBinarizer sorts class names alphabetically.
29
- # "jumpscare" comes before "normal", so the model's output for the "jumpscare" class
30
- # will be at index 0. If this is wrong, change this to 1.
31
  JUMPSCARE_CLASS_INDEX = 0
32
  logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.")
33
 
34
 
35
  def predict_frame_is_jumpscare(frame, threshold):
36
- """Analyzes a single video frame and predicts if it's a jumpscare."""
37
- # Preprocess the frame
38
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
39
  resized_frame = cv2.resize(rgb_frame, (128, 128))
40
  img_array = np.array(resized_frame) / 255.0
41
  img_array = np.expand_dims(img_array, axis=0)
42
 
43
- # Get the model's prediction (e.g., [[0.9, 0.1]])
44
  prediction = model.predict(img_array, verbose=0)
45
-
46
- # Get the specific probability for the 'jumpscare' class
47
  jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX]
48
 
49
  return jumpscare_probability > threshold
50
 
51
 
52
  def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
53
- """Analyzes a video, finds jumpscare segments, and creates a compilation."""
54
  try:
55
- # --- Initialization ---
56
  threshold = sensitivity / 100.0
57
  analysis_fps = 10
58
- pre_scare_buffer = 1.0 # seconds before the scare
59
- post_scare_buffer = 1.5 # seconds after the scare
60
-
61
- logging.info(f"Starting analysis for video: {os.path.basename(video_path)}")
62
- logging.info(f"Settings: Sensitivity={sensitivity}, Threshold={threshold}")
 
63
 
 
64
  original_clip = VideoFileClip(video_path)
65
-
66
  jumpscare_times = []
67
  total_frames = int(original_clip.duration * analysis_fps)
68
 
69
- # --- Frame-by-Frame Analysis ---
70
  progress(0, desc="Analyzing Frames...")
71
  for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")):
72
  current_time = i / analysis_fps
73
  progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
74
-
75
  if predict_frame_is_jumpscare(frame, threshold):
76
  jumpscare_times.append(current_time)
77
 
78
  if not jumpscare_times:
79
- msg = "No jumpscares detected. Try a lower sensitivity value."
80
- logging.warning(msg)
81
- raise gr.Error(msg)
82
 
83
- # --- Merge close detections into continuous segments ---
84
- logging.info(f"Merging {len(jumpscare_times)} detected frames into clips...")
85
  merged_segments = []
86
  if jumpscare_times:
87
- start_time = end_time = jumpscare_times[0]
88
- for t in jumpscare_times[1:]:
89
- if t <= end_time + post_scare_buffer:
90
- end_time = t
91
- else:
92
- merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
93
- start_time = end_time = t
94
- merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
95
-
96
- # --- Create Final Video ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  progress(0.9, desc="Stitching clips together...")
98
- final_clips = [original_clip.subclipped(start, min(end, original_clip.duration)) for start, end in merged_segments]
99
 
100
- final_video = concatenate_videoclips(final_clips, method="compose")
101
 
102
- # Save the output video to the root with a unique name
103
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
104
- output_path = f"jumpscare_compilation_{timestamp}.mp4"
105
 
106
- logging.info(f"Writing final video to {output_path}")
107
  final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
108
 
109
  original_clip.close()
110
  final_video.close()
111
-
112
- logging.info("Process completed successfully.")
113
  return output_path
114
 
115
  except Exception as e:
116
  logging.error(f"An error occurred: {e}", exc_info=True)
117
  raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}")
118
 
119
- # --- Gradio Interface (Simplified) ---
120
  iface = gr.Interface(
121
  fn=generate_jumpscare_compilation,
122
  inputs=[
123
  gr.Video(label="Upload FNAF Video"),
124
- gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity",
125
- info="Higher values require more certainty from the AI. Lower values find more, but might have errors.")
126
  ],
127
  outputs=gr.Video(label="Jumpscare Compilation"),
128
- title="🤖 AI FNAF Jumpscare Dump Generator",
129
- description="Upload a video, and the AI will find all jumpscares and compile them. All files are in the root directory."
130
  )
131
 
 
132
  if __name__ == "__main__":
133
  iface.launch()
 
1
  import cv2
2
  import numpy as np
3
  import tensorflow as tf
4
+ from moviepy.editor import VideoFileClip, concatenate_videoclips
5
  import gradio as gr
6
  from tqdm import tqdm
7
  import os
8
  import logging
9
  from datetime import datetime
10
 
11
+ # --- Configuration ---
 
12
  MODEL_PATH = 'model.h5'
 
 
 
13
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
14
 
15
  # --- Load Model ---
16
+ tf.get_logger().setLevel('ERROR')
 
 
 
 
17
  model = tf.keras.models.load_model(MODEL_PATH)
18
+ tf.get_logger().setLevel('INFO')
19
  logging.info("AI model loaded successfully.")
20
 
 
 
 
21
  JUMPSCARE_CLASS_INDEX = 0
22
  logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.")
23
 
24
 
25
  def predict_frame_is_jumpscare(frame, threshold):
 
 
26
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
27
  resized_frame = cv2.resize(rgb_frame, (128, 128))
28
  img_array = np.array(resized_frame) / 255.0
29
  img_array = np.expand_dims(img_array, axis=0)
30
 
 
31
  prediction = model.predict(img_array, verbose=0)
 
 
32
  jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX]
33
 
34
  return jumpscare_probability > threshold
35
 
36
 
37
  def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
 
38
  try:
 
39
  threshold = sensitivity / 100.0
40
  analysis_fps = 10
41
+
42
+ # --- Buffers and Gap settings ---
43
+ pre_scare_buffer = 1.0 # Seconds to add before a scare starts
44
+ post_scare_buffer = 1.5 # Seconds to add after a scare ends
45
+ # Maximum time between two detections to be considered the SAME jumpscare event
46
+ MAX_GAP_BETWEEN_DETECTIONS = 2.0
47
 
48
+ logging.info(f"Starting analysis. Sensitivity={sensitivity}, Threshold={threshold}")
49
  original_clip = VideoFileClip(video_path)
 
50
  jumpscare_times = []
51
  total_frames = int(original_clip.duration * analysis_fps)
52
 
 
53
  progress(0, desc="Analyzing Frames...")
54
  for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")):
55
  current_time = i / analysis_fps
56
  progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
 
57
  if predict_frame_is_jumpscare(frame, threshold):
58
  jumpscare_times.append(current_time)
59
 
60
  if not jumpscare_times:
61
+ raise gr.Error("No jumpscares detected. Try a lower sensitivity value or check the AI model.")
 
 
62
 
63
+ # --- REWRITTEN MERGING LOGIC ---
64
+ logging.info(f"Found {len(jumpscare_times)} jumpscare frames. Merging into distinct clips.")
65
  merged_segments = []
66
  if jumpscare_times:
67
+ # Start the first segment
68
+ start_of_segment = jumpscare_times[0]
69
+ end_of_segment = jumpscare_times[0]
70
+
71
+ for i in range(1, len(jumpscare_times)):
72
+ # If the gap to the last detection is too large, it's a new event
73
+ if jumpscare_times[i] > end_of_segment + MAX_GAP_BETWEEN_DETECTIONS:
74
+ # Finalize the previous segment by adding buffers
75
+ merged_segments.append((
76
+ max(0, start_of_segment - pre_scare_buffer),
77
+ min(original_clip.duration, end_of_segment + post_scare_buffer)
78
+ ))
79
+ # Start a new segment
80
+ start_of_segment = jumpscare_times[i]
81
+
82
+ # Always update the end time of the current segment
83
+ end_of_segment = jumpscare_times[i]
84
+
85
+ # Add the very last segment after the loop finishes
86
+ merged_segments.append((
87
+ max(0, start_of_segment - pre_scare_buffer),
88
+ min(original_clip.duration, end_of_segment + post_scare_buffer)
89
+ ))
90
+
91
+ if not merged_segments:
92
+ raise gr.Error("Could not form any clips from the detected jumpscares.")
93
+
94
+ logging.info(f"Created {len(merged_segments)} clips to stitch together.")
95
  progress(0.9, desc="Stitching clips together...")
 
96
 
97
+ final_clips = [original_clip.subclipped(start, end) for start, end in merged_segments]
98
 
99
+ final_video = concatenate_videoclips(final_clips, method="compose")
 
 
100
 
101
+ output_path = f"jumpscare_compilation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
102
  final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
103
 
104
  original_clip.close()
105
  final_video.close()
 
 
106
  return output_path
107
 
108
  except Exception as e:
109
  logging.error(f"An error occurred: {e}", exc_info=True)
110
  raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}")
111
 
112
+ # --- Gradio Interface ---
113
  iface = gr.Interface(
114
  fn=generate_jumpscare_compilation,
115
  inputs=[
116
  gr.Video(label="Upload FNAF Video"),
117
+ gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity")
 
118
  ],
119
  outputs=gr.Video(label="Jumpscare Compilation"),
120
+ title="AI FNAF Jumpscare Dump Generator"
 
121
  )
122
 
123
+ # --- Launch ---
124
  if __name__ == "__main__":
125
  iface.launch()