szili2011 commited on
Commit
e736c4a
·
verified ·
1 Parent(s): b7ad093

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -24
app.py CHANGED
@@ -1,20 +1,27 @@
1
- import gradio as gr
2
  import cv2
3
  import numpy as np
4
  from keras.models import load_model
 
 
5
 
6
- # Load the pre-trained model
7
  model = load_model('model.h5')
8
 
9
  def process_frame(frame):
10
  # Resize the image to (128, 128)
11
- resized_frame = cv2.resize(frame, (128, 128)) # Resize to 128x128
12
- resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB) # Convert to RGB
13
- img_array = np.array(resized_frame) / 255.0 # Normalize pixel values
14
- img_array = img_array.reshape(1, 128, 128, 3) # Reshape for model input
15
 
16
  prediction = model.predict(img_array) # Get prediction
17
- return prediction
 
 
 
 
 
 
18
 
19
  def convert_video_to_dumpscare(video, sensitivity):
20
  output_frames = []
@@ -25,8 +32,7 @@ def convert_video_to_dumpscare(video, sensitivity):
25
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
26
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
27
 
28
- # Define the codec and create a VideoWriter object
29
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Ensure this codec is supported
30
  out = cv2.VideoWriter('output_dumpscare.mp4', fourcc, fps, (width, height))
31
 
32
  while cap.isOpened():
@@ -34,30 +40,34 @@ def convert_video_to_dumpscare(video, sensitivity):
34
  if not ret:
35
  break
36
 
37
- # Process each frame
38
- prediction = process_frame(frame) # You can modify frame based on prediction if needed
39
-
40
- # Write the original frame to the output video
41
- out.write(frame)
42
 
43
  cap.release()
44
  out.release()
45
-
46
- return 'output_dumpscare.mp4' # Return the path to the saved video
47
 
 
 
 
 
 
 
 
 
 
48
  def gradio_interface(video, sensitivity):
49
- result = convert_video_to_dumpscare(video, sensitivity)
50
- return result
51
 
52
- # Create the Gradio interface
53
  iface = gr.Interface(
54
  fn=gradio_interface,
55
  inputs=[
56
- gr.Video(label="Import Video"),
57
- gr.Slider(label="Sensitivity", minimum=0, maximum=100, step=1, value=50)
58
  ],
59
- outputs=gr.File(label="Output Video")
60
  )
61
 
62
- # Launch the app
63
- iface.launch()
 
 
1
  import cv2
2
  import numpy as np
3
  from keras.models import load_model
4
+ from moviepy.editor import VideoFileClip
5
+ import gradio as gr
6
 
7
+ # Load the AI model
8
  model = load_model('model.h5')
9
 
10
  def process_frame(frame):
11
  # Resize the image to (128, 128)
12
+ resized_frame = cv2.resize(frame, (128, 128))
13
+ resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)
14
+ img_array = np.array(resized_frame) / 255.0
15
+ img_array = img_array.reshape(1, 128, 128, 3)
16
 
17
  prediction = model.predict(img_array) # Get prediction
18
+
19
+ # Example logic to modify frame based on prediction
20
+ if prediction[0][0] > 0.5: # Condition based on prediction
21
+ return frame # Keep the original frame if the condition is met
22
+ else:
23
+ # Example modification: convert to grayscale
24
+ return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
25
 
26
  def convert_video_to_dumpscare(video, sensitivity):
27
  output_frames = []
 
32
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
33
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
34
 
35
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 
36
  out = cv2.VideoWriter('output_dumpscare.mp4', fourcc, fps, (width, height))
37
 
38
  while cap.isOpened():
 
40
  if not ret:
41
  break
42
 
43
+ modified_frame = process_frame(frame) # Get modified frame
44
+ out.write(modified_frame) # Write the modified frame to the output video
 
 
 
45
 
46
  cap.release()
47
  out.release()
 
 
48
 
49
+ # Add audio from the original video
50
+ original_video = VideoFileClip(video)
51
+ output_video = VideoFileClip('output_dumpscare.mp4')
52
+ final_video = output_video.set_audio(original_video.audio)
53
+ final_video.write_videofile('final_output_dumpscare.mp4', codec='libx264')
54
+
55
+ return 'final_output_dumpscare.mp4'
56
+
57
+ # Gradio Interface
58
  def gradio_interface(video, sensitivity):
59
+ output_video = convert_video_to_dumpscare(video, sensitivity)
60
+ return output_video
61
 
62
+ # Create Gradio interface
63
  iface = gr.Interface(
64
  fn=gradio_interface,
65
  inputs=[
66
+ gr.Video(label="Input Video"),
67
+ gr.Slider(label="Sensitivity", minimum=0, maximum=100, step=1)
68
  ],
69
+ outputs=gr.Video(label="Output Video")
70
  )
71
 
72
+ if __name__ == "__main__":
73
+ iface.launch()