Spaces:
Build error
Build error
Added temp file for audio and other folders for faces etc
Browse files
app.py
CHANGED
@@ -15,6 +15,7 @@ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
|
15 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
16 |
import os
|
17 |
import warnings
|
|
|
18 |
import glob
|
19 |
from concurrent.futures import ThreadPoolExecutor
|
20 |
import multiprocessing
|
@@ -72,7 +73,7 @@ def audiopredict(audio):
|
|
72 |
if audio is not None:
|
73 |
audio_clip = load_audio(audio)
|
74 |
ai_generated_probability = classify_audio_clip(audio_clip)
|
75 |
-
image_path = os.path.join("
|
76 |
image = Image.open(image_path)
|
77 |
if ai_generated_probability < 0.5:
|
78 |
return "Real", "The audio is likely to be Real", "No EXIF data found in the audio", image
|
@@ -81,43 +82,41 @@ def audiopredict(audio):
|
|
81 |
|
82 |
# Video Input Code
|
83 |
def save_video(video_path):
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
val = "Deepfake"
|
112 |
|
113 |
-
|
114 |
return val, textoutput, exif, face_with_mask
|
115 |
|
116 |
def process_video(video_folder, video_filename):
|
117 |
# Additional Processing (Frames, Faces, Deepfake Identification)
|
118 |
-
frames_base_dir = "
|
119 |
-
faces_base_dir = "
|
120 |
-
selected_faces_base_dir = "
|
121 |
|
122 |
# Find the latest video
|
123 |
video_path = os.path.join(video_folder, video_filename)
|
@@ -213,7 +212,7 @@ def identify_deepfake(selected_faces_dir):
|
|
213 |
model = InceptionResnetV1(pretrained="vggface2", classify=True, num_classes=1, device=DEVICE)
|
214 |
|
215 |
# Load the model checkpoint
|
216 |
-
checkpoint_path = "
|
217 |
checkpoint = torch.load(checkpoint_path, map_location=DEVICE)
|
218 |
model.load_state_dict(checkpoint['model_state_dict'])
|
219 |
model.to(DEVICE)
|
@@ -325,7 +324,7 @@ def predictimage(input_image: Image.Image):
|
|
325 |
device=DEVICE
|
326 |
)
|
327 |
|
328 |
-
checkpoint = torch.load("
|
329 |
model.load_state_dict(checkpoint['model_state_dict'])
|
330 |
model.to(DEVICE)
|
331 |
model.eval()
|
|
|
15 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
16 |
import os
|
17 |
import warnings
|
18 |
+
import tempfile
|
19 |
import glob
|
20 |
from concurrent.futures import ThreadPoolExecutor
|
21 |
import multiprocessing
|
|
|
73 |
if audio is not None:
|
74 |
audio_clip = load_audio(audio)
|
75 |
ai_generated_probability = classify_audio_clip(audio_clip)
|
76 |
+
image_path = os.path.join("./wave.jpg")
|
77 |
image = Image.open(image_path)
|
78 |
if ai_generated_probability < 0.5:
|
79 |
return "Real", "The audio is likely to be Real", "No EXIF data found in the audio", image
|
|
|
82 |
|
83 |
# Video Input Code
|
84 |
def save_video(video_path):
|
85 |
+
# Create a temporary directory to save the video
|
86 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
87 |
+
# Extract filename from path
|
88 |
+
filename = os.path.basename(video_path)
|
89 |
+
|
90 |
+
# Save video to the temporary folder
|
91 |
+
temp_video_path = os.path.join(temp_dir, filename)
|
92 |
+
with open(temp_video_path, "wb") as f:
|
93 |
+
f.write(open(video_path, "rb").read())
|
94 |
+
|
95 |
+
# Process frames, select faces, and perform deepfake identification
|
96 |
+
textoutput, exif, face_with_mask = process_video(temp_dir, filename)
|
97 |
+
print(textoutput)
|
98 |
+
string = textoutput
|
99 |
+
|
100 |
+
# Extract percentages and convert them to floats
|
101 |
+
percentages = re.findall(r"(\d+\.\d+)%", string)
|
102 |
+
real_percentage = float(percentages[0])
|
103 |
+
fake_percentage = float(percentages[1])
|
104 |
+
|
105 |
+
# Determine which percentage is higher
|
106 |
+
if real_percentage > fake_percentage:
|
107 |
+
print("Real")
|
108 |
+
val = "Real"
|
109 |
+
else:
|
110 |
+
print("Fake")
|
111 |
+
val = "Deepfake"
|
|
|
112 |
|
|
|
113 |
return val, textoutput, exif, face_with_mask
|
114 |
|
115 |
def process_video(video_folder, video_filename):
|
116 |
# Additional Processing (Frames, Faces, Deepfake Identification)
|
117 |
+
frames_base_dir = "./frames"
|
118 |
+
faces_base_dir = "./faces"
|
119 |
+
selected_faces_base_dir = "./selected_faces"
|
120 |
|
121 |
# Find the latest video
|
122 |
video_path = os.path.join(video_folder, video_filename)
|
|
|
212 |
model = InceptionResnetV1(pretrained="vggface2", classify=True, num_classes=1, device=DEVICE)
|
213 |
|
214 |
# Load the model checkpoint
|
215 |
+
checkpoint_path = "./resnetinceptionv1_epoch_32.pth" # Update this path
|
216 |
checkpoint = torch.load(checkpoint_path, map_location=DEVICE)
|
217 |
model.load_state_dict(checkpoint['model_state_dict'])
|
218 |
model.to(DEVICE)
|
|
|
324 |
device=DEVICE
|
325 |
)
|
326 |
|
327 |
+
checkpoint = torch.load("./resnetinceptionv1_epoch_32.pth", map_location=torch.device('cpu'))
|
328 |
model.load_state_dict(checkpoint['model_state_dict'])
|
329 |
model.to(DEVICE)
|
330 |
model.eval()
|
wave.jpg
ADDED
![]() |