import gradio as gr import cv2 import numpy as np from keras.models import load_model # Load the pre-trained model model = load_model('model.h5') def process_frame(frame): # Resize the image to (128, 128) resized_frame = cv2.resize(frame, (128, 128)) # Resize to 128x128 resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) # Convert to RGB img_array = np.array(resized_frame) / 255.0 # Normalize pixel values img_array = img_array.reshape(1, 128, 128, 3) # Reshape for model input prediction = model.predict(img_array) # Get prediction return prediction def convert_video_to_dumpscare(video, sensitivity): # Initialize the output output_frames = [] # Read the video cap = cv2.VideoCapture(video) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Define the codec and create a VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('output_dumpscare.mp4', fourcc, fps, (width, height)) while cap.isOpened(): ret, frame = cap.read() if not ret: break # Process each frame prediction = process_frame(frame) # Here you can modify the frame based on prediction if needed output_frames.append(frame) # For now, just appending the original frame # Write the frame to the output video out.write(frame) cap.release() out.release() return 'output_dumpscare.mp4' # Return the path to the saved video def gradio_interface(video, sensitivity): result = convert_video_to_dumpscare(video, sensitivity) return result # Create the Gradio interface iface = gr.Interface( fn=gradio_interface, inputs=[ gr.Video(label="Import Video"), gr.Slider(label="Sensitivity", minimum=0, maximum=100, step=1, value=50) ], outputs=gr.File(label="Output Video") # Output will be a video file ) # Launch the app iface.launch()