Update app.py
Browse files
app.py
CHANGED
|
@@ -22,6 +22,27 @@ except (ImportError, AttributeError):
|
|
| 22 |
|
| 23 |
# --- Helper Functions ---
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def get_landmarks(img, landmark_step=1):
|
| 26 |
if img is None or face_mesh is None:
|
| 27 |
return None
|
|
@@ -105,50 +126,61 @@ def morph_faces(img1, img2, alpha, dim, step):
|
|
| 105 |
return (out*255).astype(np.uint8)
|
| 106 |
|
| 107 |
|
| 108 |
-
def process_video(video_path,
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
ret,frame=cap.read()
|
| 122 |
if not ret: break
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
# --- Gradio App ---
|
| 131 |
-
css="""video, img{object-fit:contain!important;}"""
|
| 132 |
with gr.Blocks(css=css) as iface:
|
| 133 |
-
gr.Markdown("#
|
| 134 |
-
gr.Markdown("Click 'Generate Morph' and watch the progress bar during processing.")
|
| 135 |
with gr.Row():
|
| 136 |
-
vid=gr.Video(label='Input Video')
|
| 137 |
-
|
| 138 |
with gr.Row():
|
| 139 |
-
res=gr.Dropdown([256,384,512,768],value=512,label='Resolution')
|
| 140 |
-
step=gr.Slider(1,4,value=1,step=1,label='Landmark Sub-sampling')
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
btn.click(
|
| 146 |
fn=process_video,
|
| 147 |
-
inputs=[vid,
|
| 148 |
-
outputs=
|
| 149 |
show_progress=True
|
| 150 |
)
|
| 151 |
-
gr.Markdown("---\n*
|
| 152 |
|
| 153 |
if __name__=='__main__':
|
| 154 |
iface.launch(debug=True)
|
|
|
|
|
|
| 22 |
|
| 23 |
# --- Helper Functions ---
|
| 24 |
|
| 25 |
+
|
| 26 |
+
def cut_and_feather(img, feather):
|
| 27 |
+
h, w = img.shape[:2]
|
| 28 |
+
mask = np.zeros((h, w), dtype=np.uint8)
|
| 29 |
+
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
| 30 |
+
if not results.multi_face_landmarks:
|
| 31 |
+
return np.zeros_like(img)
|
| 32 |
+
lm = results.multi_face_landmarks[0]
|
| 33 |
+
pts = np.array([(int(p.x*w), int(p.y*h)) for p in lm.landmark], np.int32)
|
| 34 |
+
hull = cv2.convexHull(pts)
|
| 35 |
+
cv2.fillConvexPoly(mask, hull, 255)
|
| 36 |
+
# feather mask
|
| 37 |
+
k = int(feather)
|
| 38 |
+
if k>0:
|
| 39 |
+
mask = cv2.GaussianBlur(mask, (k*2+1, k*2+1), 0)
|
| 40 |
+
fg = cv2.bitwise_and(img, img, mask=mask)
|
| 41 |
+
bg = np.zeros_like(img)
|
| 42 |
+
alpha = mask.astype(np.float32)/255.0
|
| 43 |
+
out = (fg.astype(np.float32) * alpha[...,None] + bg.astype(np.float32)*(1-alpha[...,None])).astype(np.uint8)
|
| 44 |
+
return out, mask
|
| 45 |
+
|
| 46 |
def get_landmarks(img, landmark_step=1):
|
| 47 |
if img is None or face_mesh is None:
|
| 48 |
return None
|
|
|
|
| 126 |
return (out*255).astype(np.uint8)
|
| 127 |
|
| 128 |
|
| 129 |
+
def process_video(video_path, ref_img, trans, res, step, feather):
|
| 130 |
+
cap = cv2.VideoCapture(video_path)
|
| 131 |
+
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
| 132 |
+
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 133 |
+
# prepare modified reference
|
| 134 |
+
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
| 135 |
+
mod_ref, _ = cut_and_feather(ref_bgr, feather)
|
| 136 |
+
mod_ref = cv2.resize(mod_ref, (res, res))
|
| 137 |
+
# output tmp video
|
| 138 |
+
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
|
| 139 |
+
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (res, res))
|
| 140 |
+
first_mod_frame = None; first_morphed = None
|
| 141 |
+
for i in range(total):
|
| 142 |
+
ret, frame = cap.read()
|
| 143 |
if not ret: break
|
| 144 |
+
frm_mod, mask = cut_and_feather(frame, feather)
|
| 145 |
+
frm_mod = cv2.resize(frm_mod, (res, res))
|
| 146 |
+
if i==0:
|
| 147 |
+
first_mod_frame = frm_mod.copy()
|
| 148 |
+
alpha = float(np.clip((trans+1)/2,0,1))
|
| 149 |
+
first_morphed = morph_faces(frm_mod, mod_ref, alpha, res, step)
|
| 150 |
+
mor = morph_faces(frm_mod, mod_ref, float(np.clip((trans+1)/2,0,1)), res, step)
|
| 151 |
+
out_vid.write(mor)
|
| 152 |
+
cap.release(); out_vid.release()
|
| 153 |
+
# convert back to RGB for Gradio display
|
| 154 |
+
return tmp_vid, cv2.cvtColor(first_mod_frame, cv2.COLOR_BGR2RGB), cv2.cvtColor(mod_ref, cv2.COLOR_BGR2RGB), cv2.cvtColor(first_morphed, cv2.COLOR_BGR2RGB)
|
| 155 |
+
|
| 156 |
|
| 157 |
# --- Gradio App ---
|
| 158 |
+
css = """video, img { object-fit: contain !important; }"""
|
| 159 |
with gr.Blocks(css=css) as iface:
|
| 160 |
+
gr.Markdown("# Enhanced Face Morph with Feathering")
|
|
|
|
| 161 |
with gr.Row():
|
| 162 |
+
vid = gr.Video(label='Input Video')
|
| 163 |
+
ref = gr.Image(type='numpy', label='Reference Face Image')
|
| 164 |
with gr.Row():
|
| 165 |
+
res = gr.Dropdown([256,384,512,768], value=512, label='Resolution')
|
| 166 |
+
step = gr.Slider(1,4, value=1, step=1, label='Landmark Sub-sampling')
|
| 167 |
+
feather = gr.Slider(0,50, value=10, step=1, label='Feather Radius')
|
| 168 |
+
trans = gr.Slider(-1.0,1.0, value=0.0, step=0.05, label='Transition Level')
|
| 169 |
+
btn = gr.Button('Generate Morph 🚀')
|
| 170 |
+
progress = gr.Progress()
|
| 171 |
+
out_vid = gr.Video(label='Morphed Video')
|
| 172 |
+
out_mod_frame = gr.Image(label='Modified Frame[0]')
|
| 173 |
+
out_mod_ref = gr.Image(label='Modified Reference')
|
| 174 |
+
out_morph0 = gr.Image(label='Morphed Frame[0]')
|
| 175 |
+
|
| 176 |
btn.click(
|
| 177 |
fn=process_video,
|
| 178 |
+
inputs=[vid, ref, trans, res, step, feather],
|
| 179 |
+
outputs=[out_vid, out_mod_frame, out_mod_ref, out_morph0],
|
| 180 |
show_progress=True
|
| 181 |
)
|
| 182 |
+
gr.Markdown("---\n*Workflow: cut, feather, morph, composite.*")
|
| 183 |
|
| 184 |
if __name__=='__main__':
|
| 185 |
iface.launch(debug=True)
|
| 186 |
+
|