Spaces:
Sleeping
Sleeping
| import cv2 | |
| import numpy as np | |
| import tensorflow as tf | |
| from moviepy import VideoFileClip, concatenate_videoclips | |
| import gradio as gr | |
| from tqdm import tqdm | |
| import os | |
| import logging | |
| from datetime import datetime | |
| # --- IMPORTANT CHANGE: No folders needed --- | |
| # The code now assumes 'model.h5' is in the same root directory as this app.py file. | |
| MODEL_PATH = 'model.h5' | |
| # --- Setup Basic Logging --- | |
| # This will print helpful info to the Hugging Face logs for debugging. | |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
| # --- Load Model --- | |
| if not os.path.exists(MODEL_PATH): | |
| error_msg = f"Model file not found at '{MODEL_PATH}'. Make sure you have uploaded your 'model.h5' to the root of your Space." | |
| logging.error(error_msg) | |
| raise FileNotFoundError(error_msg) | |
| model = tf.keras.models.load_model(MODEL_PATH) | |
| logging.info("AI model loaded successfully.") | |
| # Based on your training code, LabelBinarizer sorts class names alphabetically. | |
| # "jumpscare" comes before "normal", so the model's output for the "jumpscare" class | |
| # will be at index 0. If this is wrong, change this to 1. | |
| JUMPSCARE_CLASS_INDEX = 0 | |
| logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.") | |
| def predict_frame_is_jumpscare(frame, threshold): | |
| """Analyzes a single video frame and predicts if it's a jumpscare.""" | |
| # Preprocess the frame | |
| rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| resized_frame = cv2.resize(rgb_frame, (128, 128)) | |
| img_array = np.array(resized_frame) / 255.0 | |
| img_array = np.expand_dims(img_array, axis=0) | |
| # Get the model's prediction (e.g., [[0.9, 0.1]]) | |
| prediction = model.predict(img_array, verbose=0) | |
| # Get the specific probability for the 'jumpscare' class | |
| jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX] | |
| return jumpscare_probability > threshold | |
| def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()): | |
| """Analyzes a video, finds jumpscare segments, and creates a compilation.""" | |
| try: | |
| # --- Initialization --- | |
| threshold = sensitivity / 100.0 | |
| analysis_fps = 10 | |
| pre_scare_buffer = 1.0 # seconds before the scare | |
| post_scare_buffer = 1.5 # seconds after the scare | |
| logging.info(f"Starting analysis for video: {os.path.basename(video_path)}") | |
| logging.info(f"Settings: Sensitivity={sensitivity}, Threshold={threshold}") | |
| original_clip = VideoFileClip(video_path) | |
| jumpscare_times = [] | |
| total_frames = int(original_clip.duration * analysis_fps) | |
| # --- Frame-by-Frame Analysis --- | |
| progress(0, desc="Analyzing Frames...") | |
| for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")): | |
| current_time = i / analysis_fps | |
| progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s") | |
| if predict_frame_is_jumpscare(frame, threshold): | |
| jumpscare_times.append(current_time) | |
| if not jumpscare_times: | |
| msg = "No jumpscares detected. Try a lower sensitivity value." | |
| logging.warning(msg) | |
| raise gr.Error(msg) | |
| # --- Merge close detections into continuous segments --- | |
| logging.info(f"Merging {len(jumpscare_times)} detected frames into clips...") | |
| merged_segments = [] | |
| if jumpscare_times: | |
| start_time = end_time = jumpscare_times[0] | |
| for t in jumpscare_times[1:]: | |
| if t <= end_time + post_scare_buffer: | |
| end_time = t | |
| else: | |
| merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer)) | |
| start_time = end_time = t | |
| merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer)) | |
| # --- Create Final Video --- | |
| progress(0.9, desc="Stitching clips together...") | |
| final_clips = [original_clip.subclip(start, min(end, original_clip.duration)) for start, end in merged_segments] | |
| final_video = concatenate_videoclips(final_clips, method="compose") | |
| # Save the output video to the root with a unique name | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| output_path = f"jumpscare_compilation_{timestamp}.mp4" | |
| logging.info(f"Writing final video to {output_path}") | |
| final_video.write_videofile(output_path, codec="libx264", audio_codec="aac") | |
| original_clip.close() | |
| final_video.close() | |
| logging.info("Process completed successfully.") | |
| return output_path | |
| except Exception as e: | |
| logging.error(f"An error occurred: {e}", exc_info=True) | |
| raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}") | |
| # --- Gradio Interface (Simplified) --- | |
| iface = gr.Interface( | |
| fn=generate_jumpscare_compilation, | |
| inputs=[ | |
| gr.Video(label="Upload FNAF Video"), | |
| gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity", | |
| info="Higher values require more certainty from the AI. Lower values find more, but might have errors.") | |
| ], | |
| outputs=gr.Video(label="Jumpscare Compilation"), | |
| title="🤖 AI FNAF Jumpscare Dump Generator", | |
| description="Upload a video, and the AI will find all jumpscares and compile them. All files are in the root directory.", | |
| allow_flagging="never" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |