File size: 5,178 Bytes
3036471 6a45047 40eb162 5ed36c6 40eb162 51d553e 40eb162 7e14640 a030e65 c8a1d10 6a45047 189ca29 6a45047 7f79864 189ca29 7f79864 6a45047 08a3d08 6a45047 041a8ee 6a45047 08a3d08 6a45047 88dd46b 6a45047 7e14640 6a45047 7e14640 6a45047 88dd46b 08a3d08 6a45047 88dd46b 6a45047 88dd46b 6a45047 9ce537b 6a45047 9ce537b 6a45047 9ce537b 6a45047 9ce537b 6a45047 cf15135 53352ed 4c06029 189ca29 88dd46b 21f2fb0 4c06029 c8a1d10 9ce537b 6a45047 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
import time
import tempfile
import os
faceflux_model = os.environ.get("faceflux_model")
try:
with open("faceflux.py", "w") as file_object:
file_object.write(faceflux_model)
print("Successfully wrote code to faceflux.py")
except IOError as e:
print(f"Error writing to file: {e}")
import faceflux as ff
def process_video(
video_path, ref_img, trans, res, step, feather_pct,
progress=gr.Progress()
):
# --- Initialization ---
step = 5 - step
padding_pct = 0.24
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS) or 24
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
progress(0.0, desc="Initializing")
# --- Prepare masked reference ---
ref_bgr = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
mask_ref, ref_box = ff.get_face_mask_box(ref_bgr, feather_pct, padding_pct)
if mask_ref is None:
progress(None) # hide on error
return None, None, None, None
x_r, y_r, w_r, h_r = ref_box
ref_cut = ref_bgr[y_r:y_r+h_r, x_r:x_r+w_r]
mask_ref_norm = mask_ref.astype(np.float32)[..., None] / 255.0
ref_masked = (ref_cut.astype(np.float32) * mask_ref_norm).astype(np.uint8)
ref_morph = cv2.resize(ref_masked, (res, res))
progress(0.1, desc="Reference ready")
# --- Output setup ---
w_o = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h_o = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
tmp_vid = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
out_vid = cv2.VideoWriter(tmp_vid, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w_o, h_o))
# --- Frame-by-frame processing ---
for i in range(total):
ret, frame = cap.read()
if not ret:
break
progress(0.1 + 0.8 * (i / total), desc=f"Processing frame {i+1}/{total}")
mask_roi, box = ff.get_face_mask_box(frame, feather_pct, padding_pct)
if mask_roi is None:
out_vid.write(frame)
continue
x, y, w, h = box
crop = frame[y:y+h, x:x+w]
crop_resized = cv2.resize(crop, (res, res))
alpha = float(np.clip((trans+1)/2, 0, 1))
mor = ff.morph_faces(crop_resized, ref_morph, alpha, res, step)
mor_back = cv2.resize(mor, (w, h))
mask_n = (mask_roi.astype(np.float32)[..., None] / 255.0)
region = frame[y:y+h, x:x+w].astype(np.float32)
blended = region * (1-mask_n) + mor_back.astype(np.float32) * mask_n
frame[y:y+h, x:x+w] = blended.astype(np.uint8)
out_vid.write(frame)
cap.release()
out_vid.release()
progress(1.0, desc="Done")
return tmp_vid
# --- Gradio App ---
css = """
video, img { object-fit: contain !important; }
@import url('https://fonts.googleapis.com/css2?family=Bungee+Hairline&family=Climate+Crisis&family=Sulphur+Point:wght@300;400;700&display=swap');
.sulphur-point-title {
font-family: 'Sulphur Point', sans-serif;
font-weight: 700;
font-size: 2.5em;
margin-bottom: 0.2em;
}
.description-text {
font-size: 1em;
line-height: 1.5;
}
.socials {
font-size: 0.9em;
margin-top: 1em;
color: #555;
}
"""
with gr.Blocks(css=css) as iface:
gr.HTML("<div class='sulphur-point-title'>FaceFlux</div>")
gr.Markdown(
"""
<div class='description-text'>
<b>Super Fast Face Swap</b> using a CPU-friendly morphing algorithm.
<br><br>
<b>How to Use:</b>
- Upload a video (ideally where the face is visible and not too close to camera).
- Upload a clear reference image of the face to swap onto.
- <b>Strength:</b> Controls how much of the reference face is blended in.
- <b>Quality:</b> Controls the precision of face alignment (higher is slower but better).
- <b>Feather:</b> Smoothens the edge blending.
- <b>Face Resolution:</b> Higher = sharper face, but slower.
<br><br>
<b>Note:</b> Works best on short clips or videos where the target face is small or mid-sized.
</div>
""",
elem_id="description"
)
with gr.Row():
vid = gr.Video(label='Input Video')
ref = gr.Image(type='numpy', label='Reference Image')
with gr.Row():
res = gr.Dropdown([256, 384, 512, 768], value=512, label='Face Resolution')
step = gr.Slider(1, 4, value=1, step=1, label='Quality')
feather = gr.Slider(0.12, 0.24, value=0.12, step=0.01, label='Feather (%)')
trans = gr.Slider(-0.35, -0.15, value=-0.35, step=0.01, label='Strength')
btn = gr.Button('Generate Morph 🚀')
out_vid = gr.Video(label='Morphed Video')
btn.click(
fn=process_video,
inputs=[vid, ref, trans, res, step, feather],
outputs=[out_vid],
show_progress=True
)
gr.Markdown(
"""
<div class='socials'>
<b>Contact & Socials:</b><br>
📷 Instagram: <a href="https://instagram.com/nihal_gazi_io" target="_blank">@nihal_gazi_io</a> <br>
🐦 X (Twitter): <a href="https://x.com/NihalGazi_" target="_blank">@NihalGazi_</a> <br>
💬 Discord: nihal_gazi_io <br>
📧 Email: [email protected]
</div>
""",
elem_id="socials"
)
iface.queue().launch(debug=True) |