NeuralFalcon commited on
Commit
fae75a0
·
verified ·
1 Parent(s): cf8426c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -0
app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+ from matplotlib import text
4
+ import mediapipe as mp
5
+ from mediapipe.tasks.python import vision
6
+ import numpy as np
7
+ from mediapipe import solutions
8
+ from mediapipe.framework.formats import landmark_pb2
9
+ from utils import mask_overlay
10
+
11
+ def draw_landmarks_on_image(rgb_image, detection_result):
12
+ face_landmarks_list = detection_result.face_landmarks
13
+ annotated_image = np.copy(rgb_image)
14
+
15
+ # Loop through the detected faces to visualize.
16
+ for idx in range(len(face_landmarks_list)):
17
+ face_landmarks = face_landmarks_list[idx]
18
+ face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
19
+ face_landmarks_proto.landmark.extend(
20
+ [landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks]
21
+ )
22
+ solutions.drawing_utils.draw_landmarks(
23
+ image=annotated_image,
24
+ landmark_list=face_landmarks_proto,
25
+ connections=mp.solutions.face_mesh.FACEMESH_TESSELATION,
26
+ landmark_drawing_spec=None,
27
+ connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_tesselation_style()
28
+ )
29
+ solutions.drawing_utils.draw_landmarks(
30
+ image=annotated_image,
31
+ landmark_list=face_landmarks_proto,
32
+ connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,
33
+ landmark_drawing_spec=None,
34
+ connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_contours_style()
35
+ )
36
+ solutions.drawing_utils.draw_landmarks(
37
+ image=annotated_image,
38
+ landmark_list=face_landmarks_proto,
39
+ connections=mp.solutions.face_mesh.FACEMESH_IRISES,
40
+ landmark_drawing_spec=None,
41
+ connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_iris_connections_style()
42
+ )
43
+ return annotated_image
44
+
45
+ def mediapipe_config():
46
+ model_path = "face_landmarker.task"
47
+ BaseOptions = mp.tasks.BaseOptions
48
+ FaceLandmarker = mp.tasks.vision.FaceLandmarker
49
+ FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
50
+ VisionRunningMode = mp.tasks.vision.RunningMode
51
+ options = FaceLandmarkerOptions(
52
+ base_options=BaseOptions(model_asset_path=model_path),
53
+ running_mode=VisionRunningMode.VIDEO,
54
+ )
55
+ landmarker = FaceLandmarker.create_from_options(options)
56
+ return landmarker
57
+
58
+ landmarker = mediapipe_config()
59
+
60
+ def face_point(results, frame):
61
+ ih, iw, ic = frame.shape
62
+ faces = []
63
+ if results.face_landmarks:
64
+ for face_landmarks in results.face_landmarks:
65
+ face = []
66
+ for id, lm in enumerate(face_landmarks):
67
+ x, y = int(lm.x * iw), int(lm.y * ih)
68
+ face.append([id, x, y])
69
+ ## FIX: Indentation was wrong. It should be inside the loop to capture all faces.
70
+ faces.append(face)
71
+ return faces
72
+
73
+ def letterbox(image, target_width, target_height):
74
+ """Resize image keeping aspect ratio, pad with black to fit target size."""
75
+ ih, iw = image.shape[:2]
76
+ scale = min(target_width / iw, target_height / ih)
77
+ nw, nh = int(iw * scale), int(ih * scale)
78
+ resized = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_AREA)
79
+ canvas = np.zeros((target_height, target_width, 3), dtype=np.uint8)
80
+ x_offset = (target_width - nw) // 2
81
+ y_offset = (target_height - nh) // 2
82
+ canvas[y_offset:y_offset+nh, x_offset:x_offset+nw] = resized
83
+ return canvas
84
+
85
+
86
+
87
+ import subprocess
88
+ import os
89
+ import shutil
90
+ import os, shutil, subprocess
91
+
92
+ def add_audio(input_video, mask_video, save_video="final.mp4"):
93
+ try:
94
+ os.makedirs("./temp", exist_ok=True)
95
+ audio_file = os.path.abspath("./temp/temp_audio.wav")
96
+
97
+ # Normalize all paths for ffmpeg (Windows safe)
98
+ input_video = os.path.normpath(os.path.abspath(input_video))
99
+ mask_video = os.path.normpath(os.path.abspath(mask_video))
100
+ save_video = os.path.normpath(os.path.abspath(save_video))
101
+
102
+ # Step 1: Extract WAV audio
103
+ extract_cmd = [
104
+ "ffmpeg", "-y", "-i", input_video, "-vn",
105
+ "-acodec", "pcm_s16le", "-ar", "44100", "-ac", "2",
106
+ audio_file, "-hide_banner", "-loglevel", "error"
107
+ ]
108
+ subprocess.run(extract_cmd, check=True)
109
+
110
+ # Validate
111
+ if not os.path.exists(audio_file) or os.path.getsize(audio_file) == 0:
112
+ raise Exception("No audio track extracted")
113
+
114
+ # Step 2: Merge WAV + video
115
+ merge_cmd = [
116
+ "ffmpeg", "-y", "-i", mask_video, "-i", audio_file,
117
+ "-c:v", "copy", "-c:a", "aac", "-shortest",
118
+ save_video, "-hide_banner", "-loglevel", "error"
119
+ ]
120
+ subprocess.run(merge_cmd, check=True)
121
+
122
+ os.remove(audio_file)
123
+ return True
124
+
125
+ except Exception as e:
126
+ print("⚠️ Audio merge failed:", e)
127
+ try:
128
+ shutil.copy(mask_video, save_video) # fallback
129
+ except Exception as e2:
130
+ print("❌ Fallback copy failed:", e2)
131
+ return False
132
+ return False
133
+
134
+
135
+
136
+ def add_mask(upload_video,
137
+ mask_name="Blue Mask",mask_up=10, mask_down=10):
138
+ output_video="./temp/mask.mp4"
139
+ os.makedirs("./temp", exist_ok=True)
140
+ cap = cv2.VideoCapture(upload_video)
141
+ if not cap.isOpened():
142
+ print("❌ Cannot access video file")
143
+ exit()
144
+ input_fps = int(cap.get(cv2.CAP_PROP_FPS))
145
+ if input_fps <= 0 or input_fps > 120: # sanity check
146
+ input_fps = 25 # default fallback
147
+
148
+ OUTPUT_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
149
+ OUTPUT_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
150
+
151
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
152
+ out = cv2.VideoWriter(output_video, fourcc, input_fps, (OUTPUT_WIDTH, OUTPUT_HEIGHT))
153
+
154
+
155
+ # For more stable FPS calculation
156
+ frame_count = 0
157
+ fps = 0
158
+ fps_start_time = time.time()
159
+
160
+ while True:
161
+ ret, frame = cap.read()
162
+ if not ret:
163
+ break
164
+ frame = cv2.flip(frame, 1)
165
+ raw_frame=frame.copy()
166
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
167
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame_rgb)
168
+ timestamp_ms = int(cap.get(cv2.CAP_PROP_POS_MSEC))
169
+ results = landmarker.detect_for_video(mp_image, timestamp_ms)
170
+
171
+ # Create the mesh visualization
172
+ visualized_image = draw_landmarks_on_image(frame_rgb, results)
173
+ visualized_image = cv2.cvtColor(visualized_image, cv2.COLOR_RGB2BGR)
174
+
175
+ # Create the mask overlay image
176
+ faces = face_point(results, frame)
177
+ if len(faces) > 0:
178
+ masked_frame = mask_overlay(frame, faces, mask_up, mask_down, mask_name)
179
+ else:
180
+ masked_frame = frame
181
+ out.write(masked_frame)
182
+
183
+ # frame_count += 1
184
+ # if time.time() - fps_start_time >= 1.0:
185
+ # fps = frame_count / (time.time() - fps_start_time)
186
+ # frame_count = 0
187
+ # fps_start_time = time.time()
188
+ # fps_text = f"FPS: {fps:.2f}"
189
+ # cv2.putText(masked_frame, fps_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
190
+ # cv2.putText(visualized_image, fps_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
191
+ # SCREEN_W, SCREEN_H = 480, 270
192
+ # left=letterbox(raw_frame, SCREEN_W, SCREEN_H)
193
+ # middle = letterbox(visualized_image, SCREEN_W, SCREEN_H)
194
+ # right = letterbox(masked_frame, SCREEN_W, SCREEN_H)
195
+ # combined_image = np.hstack((left,middle, right))
196
+
197
+ # cv2.imshow("Face Mesh and Mask Overlay", combined_image)
198
+ # if cv2.waitKey(1) & 0xFF == ord("q"):
199
+ # break
200
+
201
+ print("Releasing resources...")
202
+ cap.release()
203
+ out.release()
204
+ cv2.destroyAllWindows()
205
+ save_video_path="./temp/"+os.path.splitext(upload_video)[0] + "_mask.mp4"
206
+ sucess=add_audio(upload_video,output_video, save_video_path)
207
+ if sucess:
208
+ print(f"✅ Masked video saved to {save_video_path}")
209
+ return save_video_path,save_video_path
210
+ else:
211
+ print("❌ Failed to save masked video.")
212
+ return output_video,output_video
213
+
214
+
215
+ # add_mask("input.mp4", "output_video.mp4", mask_up=10, mask_down=10, mask_name="Blue Mask")
216
+
217
+ import gradio as gr
218
+
219
+ def ui():
220
+ with gr.Blocks() as demo:
221
+ gr.Markdown("## Hide Face Using Squid Game Masks")
222
+ mask_names=["Front Man Mask", "Guards Mask", "Red Mask", "Blue Mask"]
223
+
224
+ with gr.Row():
225
+ with gr.Column():
226
+ video_input = gr.Video(label="Upload Video")
227
+ mask_selector = gr.Dropdown(choices=mask_names, label="Select Mask")
228
+ submit_btn = gr.Button("Apply Mask")
229
+
230
+ with gr.Accordion('Mask Settings', open=False):
231
+ mask_up = gr.Slider(minimum=0, maximum=100, label="Mask Up", value=10)
232
+ mask_down = gr.Slider(minimum=0, maximum=100, label="Mask Down", value=10)
233
+
234
+ with gr.Column():
235
+ output_video = gr.Video(label="Output Video")
236
+ download_video = gr.File(label="Download Video")
237
+
238
+ inputs = [video_input, mask_selector, mask_up, mask_down]
239
+ outputs = [output_video, download_video]
240
+
241
+ submit_btn.click(add_mask, inputs=inputs, outputs=outputs)
242
+
243
+ return demo
244
+
245
+ demo=ui()
246
+ demo.launch()