Init
Browse files- .gitignore +4 -1
- app.py +59 -0
- helpers.py +113 -0
- requirements.txt +6 -0
.gitignore
CHANGED
@@ -157,4 +157,7 @@ cython_debug/
|
|
157 |
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
-
|
|
|
|
|
|
|
|
157 |
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
.idea/
|
161 |
+
|
162 |
+
# TensorFlow Binaries
|
163 |
+
*.keras
|
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from helpers import load_video_from_url, detect_deepfake
|
3 |
+
|
4 |
+
theme = gr.themes.Default(
|
5 |
+
primary_hue="stone",
|
6 |
+
secondary_hue="blue",
|
7 |
+
neutral_hue="zinc",
|
8 |
+
spacing_size="md",
|
9 |
+
text_size="md",
|
10 |
+
font=[gr.themes.GoogleFont("IBM Plex Mono"), "system-ui"]
|
11 |
+
)
|
12 |
+
|
13 |
+
with gr.Blocks(theme=theme) as demo:
|
14 |
+
# DEFINE COMPONENTS
|
15 |
+
|
16 |
+
# Text box for inputting Youtube URL
|
17 |
+
urlInput = gr.Textbox(
|
18 |
+
label="YOUTUBE VIDEO URL",
|
19 |
+
value="https://www.youtube.com/watch?v=BmrUJhY9teE"
|
20 |
+
)
|
21 |
+
|
22 |
+
# Button for downloading the video and previewing sample frames
|
23 |
+
loadVideoBtn = gr.Button("Load Video")
|
24 |
+
|
25 |
+
# Text box for displaying video title
|
26 |
+
videoTitle = gr.Textbox(
|
27 |
+
label="VIDEO TITLE",
|
28 |
+
lines=1,
|
29 |
+
interactive=False
|
30 |
+
)
|
31 |
+
|
32 |
+
# Image Gallery for previewing sample frames
|
33 |
+
sampleFrames = gr.Gallery(
|
34 |
+
label="SAMPLE FRAMES",
|
35 |
+
elem_id="gallery",
|
36 |
+
columns=[3],
|
37 |
+
rows=[1],
|
38 |
+
object_fit="contain",
|
39 |
+
height="auto"
|
40 |
+
)
|
41 |
+
|
42 |
+
# Button for generating video prediction
|
43 |
+
predVideoBtn = gr.Button(value="Classify Video", visible=False)
|
44 |
+
|
45 |
+
# Label for displaying prediction
|
46 |
+
predOutput = gr.Label(
|
47 |
+
label="DETECTED LABEL (AND CONFIDENCE LEVEL)",
|
48 |
+
num_top_classes=2,
|
49 |
+
visible=False
|
50 |
+
)
|
51 |
+
|
52 |
+
# DEFINE FUNCTIONS
|
53 |
+
# Load video from URL, display sample frames, and enable prediction button
|
54 |
+
loadVideoBtn.click(fn=load_video_from_url, inputs=[urlInput], outputs=[videoTitle, sampleFrames, predVideoBtn, predOutput])
|
55 |
+
|
56 |
+
# Generate video prediction
|
57 |
+
predVideoBtn.click(fn=detect_deepfake, outputs=[predOutput])
|
58 |
+
|
59 |
+
demo.launch()
|
helpers.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import os
|
5 |
+
import gradio as gr
|
6 |
+
from keras.models import load_model
|
7 |
+
from pytube import YouTube
|
8 |
+
import pickle
|
9 |
+
|
10 |
+
|
11 |
+
def load_video_from_url(youtube_url):
|
12 |
+
# DOWNLOAD THE VIDEO USING THE GIVEN URL
|
13 |
+
yt = YouTube(youtube_url)
|
14 |
+
yt_stream = yt.streams.filter(file_extension='mp4').first()
|
15 |
+
title = yt_stream.title
|
16 |
+
src = yt_stream.download()
|
17 |
+
capture = cv2.VideoCapture(src)
|
18 |
+
|
19 |
+
# SAMPLE FRAMES FROM VIDEO FILE
|
20 |
+
sampled_frames = sample_frames_from_video_file(capture)
|
21 |
+
|
22 |
+
# PICK EXAMPLE FRAME FROM THE MIDDLE OF THE SAMPLED FRAMES
|
23 |
+
example_frames = [
|
24 |
+
sampled_frames[len(sampled_frames) // 4],
|
25 |
+
sampled_frames[len(sampled_frames) // 2],
|
26 |
+
sampled_frames[3 * len(sampled_frames) // 4],
|
27 |
+
]
|
28 |
+
|
29 |
+
# DELETE VIDEO FILE
|
30 |
+
if os.path.exists(src):
|
31 |
+
os.remove(src)
|
32 |
+
|
33 |
+
# CONVERT SAMPLED FRAMES TO TENSOR
|
34 |
+
frames_tensor = tf.expand_dims(tf.convert_to_tensor(sampled_frames, dtype=tf.float32), axis=0)
|
35 |
+
|
36 |
+
# SAVE TENSOR TO FILE
|
37 |
+
pickle.dump(frames_tensor, open("frames_tf.pkl", "wb"))
|
38 |
+
|
39 |
+
# Define visible prediction components to show upon video loaded
|
40 |
+
predVideoBtn = gr.Button(value="Classify Video", visible=True)
|
41 |
+
|
42 |
+
predOutput = gr.Label(
|
43 |
+
label="DETECTED LABEL (AND CONFIDENCE LEVEL)",
|
44 |
+
num_top_classes=2,
|
45 |
+
visible=True
|
46 |
+
)
|
47 |
+
|
48 |
+
return title, example_frames, predVideoBtn, predOutput
|
49 |
+
|
50 |
+
|
51 |
+
def detect_deepfake():
|
52 |
+
# LOAD FRAMES
|
53 |
+
frames_tf = pickle.load(open("frames_tf.pkl", "rb"))
|
54 |
+
|
55 |
+
# DELETE FRAMES FILE
|
56 |
+
if os.path.exists("frames_tf.pkl"):
|
57 |
+
os.remove("frames_tf.pkl")
|
58 |
+
|
59 |
+
# LOAD THE RNN MODEL FROM DISK
|
60 |
+
loaded_model = load_model("MesonetRNN.keras")
|
61 |
+
# loaded_model.summary()
|
62 |
+
|
63 |
+
# GET PREDICTION
|
64 |
+
out = loaded_model.predict(frames_tf)
|
65 |
+
real_confidence = out[0][0]
|
66 |
+
fake_confidence = 1 - real_confidence
|
67 |
+
confidence_dict = {"FAKE": fake_confidence, "REAL": real_confidence}
|
68 |
+
|
69 |
+
# RETURN THE OUTPUT LABEL AND EXAMPLE FRAMES
|
70 |
+
return confidence_dict
|
71 |
+
|
72 |
+
|
73 |
+
def sample_frames_from_video_file(capture, sample_count=10, frames_per_sample=10, frame_step=10,
|
74 |
+
output_size=(256, 256)):
|
75 |
+
# Read each video frame by frame
|
76 |
+
result = []
|
77 |
+
|
78 |
+
video_length = capture.get(cv2.CAP_PROP_FRAME_COUNT)
|
79 |
+
|
80 |
+
need_length = 1 + (frames_per_sample - 1) * frame_step
|
81 |
+
|
82 |
+
max_start = video_length - need_length
|
83 |
+
|
84 |
+
sample_starts = []
|
85 |
+
|
86 |
+
for sample in range(sample_count):
|
87 |
+
sample_start = int(max_start * sample / sample_count)
|
88 |
+
sample_starts.append(sample_start)
|
89 |
+
# print(sample_start)
|
90 |
+
|
91 |
+
for start in sample_starts:
|
92 |
+
capture.set(cv2.CAP_PROP_POS_FRAMES, start)
|
93 |
+
# ret is a boolean indicating whether read was successful, frame is the image itself
|
94 |
+
ret, frame = capture.read()
|
95 |
+
result.append(format_frames(frame, output_size))
|
96 |
+
|
97 |
+
for _ in range(frames_per_sample - 1):
|
98 |
+
for _ in range(frame_step):
|
99 |
+
ret, frame = capture.read()
|
100 |
+
if ret:
|
101 |
+
frame = format_frames(frame, output_size)
|
102 |
+
result.append(frame)
|
103 |
+
else:
|
104 |
+
result.append(np.zeros_like(result[0]))
|
105 |
+
capture.release()
|
106 |
+
|
107 |
+
return np.array(result)
|
108 |
+
|
109 |
+
|
110 |
+
def format_frames(frame, output_size):
|
111 |
+
frame = tf.image.convert_image_dtype(frame, tf.float32)
|
112 |
+
frame = tf.image.resize_with_pad(frame, *output_size)
|
113 |
+
return frame
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.31.3
|
2 |
+
keras==3.1.1
|
3 |
+
numpy==1.26.4
|
4 |
+
pytube==15.0.0
|
5 |
+
tensorflow==2.16.1
|
6 |
+
tensorflow_intel==2.16.1
|