szili2011 commited on
Commit
7259665
·
verified ·
1 Parent(s): d8c37f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -40
app.py CHANGED
@@ -1,58 +1,52 @@
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
- import tensorflow as tf
5
 
6
- # Load your model
7
- model = tf.keras.models.load_model('model.h5') # No path needed if it's in the same directory
8
 
9
- # Function to resize frames
10
- def resize_frame(frame, size=(64, 64)):
11
- return cv2.resize(frame, size)
12
-
13
- # Function to process each frame
14
  def process_frame(frame):
15
- # Resize the frame
16
- resized_frame = resize_frame(frame)
17
-
18
- # Normalize and prepare the frame for the model
19
- img = resized_frame.astype('float32') / 255.0
20
- img = np.expand_dims(img, axis=0)
21
- img = img.reshape(1, -1) # Flatten to match the input shape
22
-
23
- prediction = model.predict(img)
24
- return prediction[0][1] # Assuming category 1 is jumpscare
25
-
26
- # Function to convert video to dumpscare
27
- def convert_video_to_dumpscare(video_path, sensitivity):
28
- cap = cv2.VideoCapture(video_path)
29
- jumpscare_frames = []
30
-
31
  while cap.isOpened():
32
  ret, frame = cap.read()
33
  if not ret:
34
  break
 
 
35
  prediction = process_frame(frame)
36
- if prediction > sensitivity: # Adjust this threshold as needed
37
- jumpscare_frames.append(frame)
38
 
39
  cap.release()
 
40
 
41
- # Here you can save jumpscare frames or create a new video
42
- return "Dumpscare video created successfully!" # Change this as needed
43
-
44
- # Gradio interface
45
  def gradio_interface(video, sensitivity):
46
  result = convert_video_to_dumpscare(video, sensitivity)
47
  return result
48
 
49
- with gr.Blocks() as demo:
50
- gr.Markdown("## Video Dumpscare Generator")
51
- video_input = gr.Video(label="Upload Video")
52
- sensitivity_input = gr.Slider(minimum=0, maximum=1, label="Sensitivity", value=0.5)
53
- submit_btn = gr.Button("Cut Video")
54
- output_text = gr.Textbox(label="Output")
55
-
56
- submit_btn.click(gradio_interface, inputs=[video_input, sensitivity_input], outputs=output_text)
57
-
58
- demo.launch()
 
 
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
+ from keras.models import load_model
5
 
6
+ # Load the pre-trained model
7
+ model = load_model('model.h5')
8
 
 
 
 
 
 
9
  def process_frame(frame):
10
+ # Resize the image to (128, 128)
11
+ resized_frame = cv2.resize(frame, (128, 128)) # Resize to 128x128
12
+ resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) # Convert to RGB
13
+ img_array = np.array(resized_frame) / 255.0 # Normalize pixel values
14
+ img_array = img_array.reshape(1, 128, 128, 3) # Reshape for model input
15
+
16
+ prediction = model.predict(img_array) # Get prediction
17
+ return prediction
18
+
19
+ def convert_video_to_dumpscare(video, sensitivity):
20
+ # Initialize the output
21
+ output_frames = []
22
+
23
+ # Read the video
24
+ cap = cv2.VideoCapture(video)
 
25
  while cap.isOpened():
26
  ret, frame = cap.read()
27
  if not ret:
28
  break
29
+
30
+ # Process each frame
31
  prediction = process_frame(frame)
32
+ output_frames.append(prediction)
 
33
 
34
  cap.release()
35
+ return output_frames
36
 
 
 
 
 
37
  def gradio_interface(video, sensitivity):
38
  result = convert_video_to_dumpscare(video, sensitivity)
39
  return result
40
 
41
+ # Create the Gradio interface
42
+ iface = gr.Interface(
43
+ fn=gradio_interface,
44
+ inputs=[
45
+ gr.Video(label="Import Video"), # Ensure video input
46
+ gr.Slider(label="Sensitivity", minimum=0, maximum=100, step=1, default=50)
47
+ ],
48
+ outputs="text" # Change this according to your desired output format
49
+ )
50
+
51
+ # Launch the app
52
+ iface.launch()