import gradio as gr import cv2 import numpy as np import tensorflow as tf # Load your model model = tf.keras.models.load_model('model.h5') # No path needed if it's in the same directory # Function to resize frames def resize_frame(frame, size=(64, 64)): return cv2.resize(frame, size) # Function to process each frame def process_frame(frame): # Resize the frame resized_frame = resize_frame(frame) # Normalize and prepare the frame for the model img = resized_frame.astype('float32') / 255.0 img = np.expand_dims(img, axis=0) img = img.reshape(1, -1) # Flatten to match the input shape prediction = model.predict(img) return prediction[0][1] # Assuming category 1 is jumpscare # Function to convert video to dumpscare def convert_video_to_dumpscare(video_path, sensitivity): cap = cv2.VideoCapture(video_path) jumpscare_frames = [] while cap.isOpened(): ret, frame = cap.read() if not ret: break prediction = process_frame(frame) if prediction > sensitivity: # Adjust this threshold as needed jumpscare_frames.append(frame) cap.release() # Here you can save jumpscare frames or create a new video return "Dumpscare video created successfully!" # Change this as needed # Gradio interface def gradio_interface(video, sensitivity): result = convert_video_to_dumpscare(video, sensitivity) return result with gr.Blocks() as demo: gr.Markdown("## Video Dumpscare Generator") video_input = gr.Video(label="Upload Video") sensitivity_input = gr.Slider(minimum=0, maximum=1, label="Sensitivity", value=0.5) submit_btn = gr.Button("Cut Video") output_text = gr.Textbox(label="Output") submit_btn.click(gradio_interface, inputs=[video_input, sensitivity_input], outputs=output_text) demo.launch()