NihalGazi's picture
Update app.py
9ce537b verified
raw
history blame
5.18 kB
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
import time
import tempfile
import os
faceflux_model = os.environ.get("faceflux_model")
try:
with open("faceflux.py", "w") as file_object:
file_object.write(faceflux_model)
print("Successfully wrote code to faceflux.py")
except IOError as e:
print(f"Error writing to file: {e}")
import faceflux as ff
def process_video(
video_path, ref_img, trans, res, step, feather_pct,
progress=gr.Progress()
):
# --- Initialization ---
step = 5 - step
padding_pct = 0.24
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS) or 24
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
progress(0.0, desc="Initializing")
# --- Prepare masked reference ---
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
mask_ref, ref_box = ff.get_face_mask_box(ref_bgr, feather_pct, padding_pct)
if mask_ref is None:
progress(None) # hide on error
return None, None, None, None
x_r, y_r, w_r, h_r = ref_box
ref_cut = ref_bgr[y_r:y_r+h_r, x_r:x_r+w_r]
mask_ref_norm = mask_ref.astype(np.float32)[..., None] / 255.0
ref_masked = (ref_cut.astype(np.float32) * mask_ref_norm).astype(np.uint8)
ref_morph = cv2.resize(ref_masked, (res, res))
progress(0.1, desc="Reference ready")
# --- Output setup ---
w_o = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h_o = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_o, h_o))
# --- Frame-by-frame processing ---
for i in range(total):
ret, frame = cap.read()
if not ret:
break
progress(0.1 + 0.8 * (i / total), desc=f"Processing frame {i+1}/{total}")
mask_roi, box = ff.get_face_mask_box(frame, feather_pct, padding_pct)
if mask_roi is None:
out_vid.write(frame)
continue
x, y, w, h = box
crop = frame[y:y+h, x:x+w]
crop_resized = cv2.resize(crop, (res, res))
alpha = float(np.clip((trans+1)/2, 0, 1))
mor = ff.morph_faces(crop_resized, ref_morph, alpha, res, step)
mor_back = cv2.resize(mor, (w, h))
mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
region = frame[y:y+h, x:x+w].astype(np.float32)
blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
frame[y:y+h, x:x+w] = blended.astype(np.uint8)
out_vid.write(frame)
cap.release()
out_vid.release()
progress(1.0, desc="Done")
return tmp_vid
# --- Gradio App ---
css = """
video, img { object-fit: contain !important; }
@import url('https://fonts.googleapis.com/css2?family=Bungee+Hairline&family=Climate+Crisis&family=Sulphur+Point:wght@300;400;700&display=swap');
.sulphur-point-title {
font-family: 'Sulphur Point', sans-serif;
font-weight: 700;
font-size: 2.5em;
margin-bottom: 0.2em;
}
.description-text {
font-size: 1em;
line-height: 1.5;
}
.socials {
font-size: 0.9em;
margin-top: 1em;
color: #555;
}
"""
with gr.Blocks(css=css) as iface:
gr.HTML("<div class='sulphur-point-title'>FaceFlux</div>")
gr.Markdown(
"""
<div class='description-text'>
<b>Super Fast Face Swap</b> using a CPU-friendly morphing algorithm.
<br><br>
<b>How to Use:</b>
- Upload a video (ideally where the face is visible and not too close to camera).
- Upload a clear reference image of the face to swap onto.
- <b>Strength:</b> Controls how much of the reference face is blended in.
- <b>Quality:</b> Controls the precision of face alignment (higher is slower but better).
- <b>Feather:</b> Smoothens the edge blending.
- <b>Face Resolution:</b> Higher = sharper face, but slower.
<br><br>
<b>Note:</b> Works best on short clips or videos where the target face is small or mid-sized.
</div>
""",
elem_id="description"
)
with gr.Row():
vid = gr.Video(label='Input Video')
ref = gr.Image(type='numpy', label='Reference Image')
with gr.Row():
res = gr.Dropdown([256, 384, 512, 768], value=512, label='Face Resolution')
step = gr.Slider(1, 4, value=1, step=1, label='Quality')
feather = gr.Slider(0.12, 0.24, value=0.12, step=0.01, label='Feather (%)')
trans = gr.Slider(-0.35, -0.15, value=-0.35, step=0.01, label='Strength')
btn = gr.Button('Generate Morph 🚀')
out_vid = gr.Video(label='Morphed Video')
btn.click(
fn=process_video,
inputs=[vid, ref, trans, res, step, feather],
outputs=[out_vid],
show_progress=True
)
gr.Markdown(
"""
<div class='socials'>
<b>Contact & Socials:</b><br>
📷 Instagram: <a href="https://instagram.com/nihal_gazi_io" target="_blank">@nihal_gazi_io</a> <br>
🐦 X (Twitter): <a href="https://x.com/NihalGazi_" target="_blank">@NihalGazi_</a> <br>
💬 Discord: nihal_gazi_io <br>
📧 Email: [email protected]
</div>
""",
elem_id="socials"
)
iface.queue().launch(debug=True)