import os import sys import uuid import logging import base64 import shutil from typing import Optional, Tuple import gradio as gr import spaces import torch import cv2 import numpy as np from huggingface_hub import snapshot_download # ----------------------------------------------------------------------------- # Environment for HF Spaces # ----------------------------------------------------------------------------- os.environ.setdefault("GRADIO_TEMP_DIR", "/tmp/gradio") os.environ.setdefault("TMPDIR", "/tmp") os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) os.makedirs(os.environ["TMPDIR"], exist_ok=True) # ----------------------------------------------------------------------------- # Config via environment variables (set these in your Space settings) # ----------------------------------------------------------------------------- # Required (you uploaded these as separate model repos on HF): # - FFHQFACEALIGNMENT_REPO (e.g., "yourname/FFHQFaceAlignment") # - HAIRMAPPER_REPO (e.g., "yourname/HairMapper") # - SD15_REPO (e.g., "yourname/stable-diffusion-v1-5") # Optional: # - TRAINED_MODEL_REPO (if you uploaded motion/control/ref ckpts as a repo) # If TRAINED_MODEL_REPO not provided, we will try to use local "./pretrain". FFHQFACEALIGNMENT_REPO = os.getenv("FFHQFACEALIGNMENT_REPO", "") HAIRMAPPER_REPO = os.getenv("HAIRMAPPER_REPO", "") SD15_REPO = os.getenv("SD15_REPO", "") TRAINED_MODEL_REPO = os.getenv("TRAINED_MODEL_REPO", "") # 优先读取官方变量名,其次兼容 HF_TOKEN HF_AUTH_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") # ----------------------------------------------------------------------------- # Utilities # ----------------------------------------------------------------------------- def _ensure_symlink(src_dir: str, dst_path: str) -> str: """Create a directory symlink at dst_path pointing to src_dir if not exists. If symlink creation is unavailable, fallback to copying a minimal structure. Returns the final path that should be used by imports (dst_path if created, else src_dir). """ try: if os.path.islink(dst_path) or os.path.isdir(dst_path): return dst_path os.symlink(src_dir, dst_path, target_is_directory=True) return dst_path except Exception: # Fallback: try to create the directory and copy only top-level python files/dirs needed try: if not os.path.exists(dst_path): os.makedirs(dst_path, exist_ok=True) # Last resort: shallow copy (can still be heavy; symlink is preferred on HF Linux) for name in os.listdir(src_dir): src = os.path.join(src_dir, name) dst = os.path.join(dst_path, name) if os.path.exists(dst): continue if os.path.isdir(src): shutil.copytree(src, dst) else: shutil.copy2(src, dst) return dst_path except Exception: # Give up and return original source return src_dir def _find_model_root(path: str) -> str: """Given a snapshot path, return the directory containing model_index.json. Handles repos that nest the folder (e.g., repo/stable-diffusion-v1-5/...). """ if os.path.isfile(os.path.join(path, "model_index.json")): return path # Search one level deep for a folder with model_index.json for name in os.listdir(path): cand = os.path.join(path, name) if os.path.isdir(cand) and os.path.isfile(os.path.join(cand, "model_index.json")): return cand # As a fallback, return original path return path def _download_models() -> Tuple[Optional[str], Optional[str], Optional[str]]: """Download HF model repos and prepare local paths. Returns: - sd15_path: path to the Stable Diffusion v1-5 folder (with model_index.json) - hairmapper_dir: path to local HairMapper folder (import root) - ffhq_dir: path to local FFHQFaceAlignment folder (import root) """ cache_dir = os.getenv("HF_HUB_CACHE", None) # 1) Stable Diffusion 1.5 sd15_path = None if SD15_REPO: sd_snap = snapshot_download( repo_id=SD15_REPO, local_files_only=False, cache_dir=cache_dir, token=HF_AUTH_TOKEN, ) sd15_path = _find_model_root(sd_snap) # 2) HairMapper hairmapper_dir = None if HAIRMAPPER_REPO: hm_snap = snapshot_download( repo_id=HAIRMAPPER_REPO, local_files_only=False, cache_dir=cache_dir, token=HF_AUTH_TOKEN, ) # If repo root contains a nested "HairMapper" folder, link to that subfolder. hm_src = hm_snap nested_hm = os.path.join(hm_snap, "HairMapper") if os.path.isdir(nested_hm) and ( os.path.isfile(os.path.join(nested_hm, "hair_mapper_run.py")) or os.path.isdir(os.path.join(nested_hm, "mapper")) ): hm_src = nested_hm # Create a symlink so that imports like "from HairMapper..." work hairmapper_dir = _ensure_symlink(hm_src, os.path.abspath("HairMapper")) if hairmapper_dir not in sys.path: sys.path.insert(0, os.path.dirname(hairmapper_dir)) # 3) FFHQFaceAlignment ffhq_dir = None if FFHQFACEALIGNMENT_REPO: fa_snap = snapshot_download( repo_id=FFHQFACEALIGNMENT_REPO, local_files_only=False, cache_dir=cache_dir, token=HF_AUTH_TOKEN, ) # If repo root contains a nested "FFHQFaceAlignment" folder, link to that subfolder. fa_src = fa_snap nested_fa = os.path.join(fa_snap, "FFHQFaceAlignment") if os.path.isdir(nested_fa) and ( os.path.isfile(os.path.join(nested_fa, "align.py")) or os.path.isdir(os.path.join(nested_fa, "lib")) ): fa_src = nested_fa # Create a symlink so that _maybe_align_image can import modules relatively ffhq_dir = _ensure_symlink(fa_src, os.path.abspath("FFHQFaceAlignment")) if ffhq_dir not in sys.path: sys.path.insert(0, os.path.dirname(ffhq_dir)) # 4) Optional: Trained model weights (motion/control/ref) if TRAINED_MODEL_REPO: tm_snap = snapshot_download( repo_id=TRAINED_MODEL_REPO, local_files_only=False, cache_dir=cache_dir, token=HF_AUTH_TOKEN, ) # Symlink to ./trained_model so downstream code can load from there _ = _ensure_symlink(tm_snap, os.path.abspath("trained_model")) return sd15_path, hairmapper_dir, ffhq_dir # ----------------------------------------------------------------------------- # Lazy imports that rely on downloaded models/paths # ----------------------------------------------------------------------------- def _import_inference_bits(): from test_stablehairv2 import log_validation from test_stablehairv2 import UNet3DConditionModel, ControlNetModel, CCProjection from test_stablehairv2 import AutoTokenizer, CLIPVisionModelWithProjection, AutoencoderKL, UNet2DConditionModel from test_stablehairv2 import _maybe_align_image from HairMapper.hair_mapper_run import bald_head return ( log_validation, UNet3DConditionModel, ControlNetModel, CCProjection, AutoTokenizer, CLIPVisionModelWithProjection, AutoencoderKL, UNet2DConditionModel, _maybe_align_image, bald_head, ) # ----------------------------------------------------------------------------- # Prepare models on startup # ----------------------------------------------------------------------------- SD15_PATH, _, _ = _download_models() # ----------------------------------------------------------------------------- # Gradio inference # ----------------------------------------------------------------------------- with open("imgs/background.png", "rb") as f: _b64_bg = base64.b64encode(f.read()).decode() @spaces.GPU def inference(id_image, hair_image): # Require GPU (HairMapper currently uses CUDA explicitly) if not torch.cuda.is_available(): raise RuntimeError("This demo requires a GPU Space. Please enable a GPU in this Space.") ( log_validation, UNet3DConditionModel, ControlNetModel, CCProjection, AutoTokenizer, CLIPVisionModelWithProjection, AutoencoderKL, UNet2DConditionModel, _maybe_align_image, bald_head, ) = _import_inference_bits() # Disable StyleGAN2 custom CUDA ops to avoid JIT compiling (needs ninja/NVCC). # ZeroGPU 下建议走纯 PyTorch 引用实现,避免扩展编译失败。 try: from HairMapper.styleGAN2_ada_model.stylegan2_ada.torch_utils.ops import bias_act as _bias_act _bias_act.USING_CUDA_TO_SPEED_UP = False try: from HairMapper.styleGAN2_ada_model.stylegan2_ada.torch_utils.ops import upfirdn2d as _upfirdn2d if hasattr(_upfirdn2d, 'USING_CUDA_TO_SPEED_UP'): _upfirdn2d.USING_CUDA_TO_SPEED_UP = False except Exception: pass try: from HairMapper.styleGAN2_ada_model.stylegan2_ada.torch_utils.ops import filtered_lrelu as _fl if hasattr(_fl, 'USING_CUDA_TO_SPEED_UP'): _fl.USING_CUDA_TO_SPEED_UP = False except Exception: pass except Exception: pass os.makedirs("gradio_inputs", exist_ok=True) os.makedirs("gradio_outputs", exist_ok=True) id_path = "gradio_inputs/id.png" hair_path = "gradio_inputs/hair.png" id_image.save(id_path) hair_image.save(hair_path) # Align aligned_id = _maybe_align_image(id_path, output_size=1024, prefer_cuda=True) aligned_hair = _maybe_align_image(hair_path, output_size=1024, prefer_cuda=True) aligned_id_path = "gradio_outputs/aligned_id.png" aligned_hair_path = "gradio_outputs/aligned_hair.png" cv2.imwrite(aligned_id_path, cv2.cvtColor(aligned_id, cv2.COLOR_RGB2BGR)) cv2.imwrite(aligned_hair_path, cv2.cvtColor(aligned_hair, cv2.COLOR_RGB2BGR)) # Balding bald_id_path = "gradio_outputs/bald_id.png" cv2.imwrite(bald_id_path, cv2.cvtColor(aligned_id, cv2.COLOR_RGB2BGR)) bald_head(bald_id_path, bald_id_path) # Resolve trained model dir trained_model_dir = os.path.abspath("trained_model") if os.path.isdir("trained_model") else None if trained_model_dir is None and os.path.isdir("pretrain"): trained_model_dir = os.path.abspath("pretrain") if trained_model_dir is None: raise RuntimeError("Missing trained model weights. Provide TRAINED_MODEL_REPO or include ./pretrain.") class Args: pretrained_model_name_or_path = SD15_PATH or os.path.abspath("stable-diffusion-v1-5/stable-diffusion-v1-5") model_path = trained_model_dir image_encoder = "openai/clip-vit-large-patch14" controlnet_model_name_or_path = None revision = None output_dir = "gradio_outputs" seed = 42 num_validation_images = 1 validation_ids = [aligned_id_path] validation_hairs = [aligned_hair_path] use_fp16 = False align_before_infer = True align_size = 1024 args = Args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) # Load tokenizer/encoders/vae tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision) image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.image_encoder, revision=args.revision).to(device) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision).to(device, dtype=torch.float32) from omegaconf import OmegaConf infer_config = OmegaConf.load('./configs/inference/inference_v2.yaml') # UNet2D with 8-channel conv_in unet2 = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, torch_dtype=torch.float32 ).to(device) conv_in_8 = torch.nn.Conv2d(8, unet2.conv_in.out_channels, kernel_size=unet2.conv_in.kernel_size, padding=unet2.conv_in.padding) conv_in_8.requires_grad_(False) unet2.conv_in.requires_grad_(False) torch.nn.init.zeros_(conv_in_8.weight) conv_in_8.weight[:, :4, :, :].copy_(unet2.conv_in.weight) conv_in_8.bias.copy_(unet2.conv_in.bias) unet2.conv_in = conv_in_8 controlnet = ControlNetModel.from_unet(unet2).to(device) state_dict2 = torch.load(os.path.join(args.model_path, "pytorch_model.bin"), map_location="cpu") controlnet.load_state_dict(state_dict2, strict=False) prefix = "motion_module" ckpt_num = "4140000" save_path = os.path.join(args.model_path, f"{prefix}-{ckpt_num}.pth") denoising_unet = UNet3DConditionModel.from_pretrained_2d( args.pretrained_model_name_or_path, save_path, subfolder="unet", unet_additional_kwargs=infer_config.unet_additional_kwargs, ).to(device) cc_projection = CCProjection().to(device) state_dict3 = torch.load(os.path.join(args.model_path, "pytorch_model_1.bin"), map_location="cpu") cc_projection.load_state_dict(state_dict3, strict=False) from ref_encoder.reference_unet import ref_unet Hair_Encoder = ref_unet.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, low_cpu_mem_usage=False, device_map=None, ignore_mismatched_sizes=True ).to(device) state_dict4 = torch.load(os.path.join(args.model_path, "pytorch_model_2.bin"), map_location="cpu") Hair_Encoder.load_state_dict(state_dict4, strict=False) # Run inference log_validation( vae, tokenizer, image_encoder, denoising_unet, args, device, logger, cc_projection, controlnet, Hair_Encoder ) output_video = os.path.join(args.output_dir, "validation", "generated_video_0.mp4") # Extract frames for slider preview frames_dir = os.path.join(args.output_dir, "frames", uuid.uuid4().hex) os.makedirs(frames_dir, exist_ok=True) cap = cv2.VideoCapture(output_video) frames_list = [] idx = 0 while True: ret, frame = cap.read() if not ret: break fp = os.path.join(frames_dir, f"{idx:03d}.png") cv2.imwrite(fp, frame) frames_list.append(fp) idx += 1 cap.release() max_frames = len(frames_list) if frames_list else 1 first_frame = frames_list[0] if frames_list else None return ( aligned_id_path, aligned_hair_path, bald_id_path, output_video, frames_list, gr.update(minimum=1, maximum=max_frames, value=1, step=1), first_frame, ) # ----------------------------------------------------------------------------- # UI (Blocks) # ----------------------------------------------------------------------------- CSS = f""" html, body {{ height: 100%; margin: 0; padding: 0; }} .gradio-container {{ width: 100% !important; height: 100% !important; margin: 0 !important; padding: 0 !important; background-image: url("data:image/jpeg;base64,{_b64_bg}"); background-size: cover; background-position: center; background-attachment: fixed; }} #title-card {{ background: rgba(255, 255, 255, 0.8); border-radius: 12px; padding: 16px 24px; box-shadow: 0 2px 8px rgba(0,0,0,0.15); margin-bottom: 20px; }} #title-card h2 {{ text-align: center; margin: 4px 0 12px 0; font-size: 28px; }} #title-card p {{ text-align: center; font-size: 16px; color: #374151; }} .out-card {{ border:1px solid #e5e7eb; border-radius:10px; padding:10px; background: rgba(255,255,255,0.85); }} .two-col {{ display:grid !important; grid-template-columns: 360px minmax(680px, 1fr); gap:16px }} .left-pane {{min-width: 360px}} .right-pane {{min-width: 680px}} .tabs {{ background: rgba(255,255,255,0.88); border-radius: 12px; box-shadow: 0 8px 24px rgba(0,0,0,0.08); padding: 8px; border: 1px solid #e5e7eb; }} .tab-nav {{ display: flex; gap: 8px; margin-bottom: 8px; background: transparent; border-bottom: 1px solid #e5e7eb; padding-bottom: 6px; }} .tabitem {{ background: rgba(255,255,255,0.88); border-radius: 10px; padding: 8px; }} #hair_gallery_wrap {{ height: 260px !important; overflow-y: scroll !important; overflow-x: auto !important; }} #hair_gallery_wrap .grid, #hair_gallery_wrap .wrap {{ height: 100% !important; overflow-y: scroll !important; }} #hair_gallery {{ height: 100% !important; }} """ with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="slate"), css=CSS) as demo: with gr.Group(elem_id="title-card"): gr.Markdown("""

StableHairV2 多视角发型迁移

上传身份图与发型参考图,系统将自动完成 对齐 → 秃头化 → 视频生成

""") with gr.Row(elem_classes=["two-col"]): with gr.Column(scale=5, min_width=260, elem_classes=["left-pane"]): id_input = gr.Image(type="pil", label="身份图", height=200) hair_input = gr.Image(type="pil", label="发型参考图", height=200) with gr.Row(): run_btn = gr.Button("开始生成", variant="primary") clear_btn = gr.Button("清空") def _list_imgs(dir_path: str): exts = (".png", ".jpg", ".jpeg", ".webp") try: files = [os.path.join(dir_path, f) for f in sorted(os.listdir(dir_path)) if f.lower().endswith(exts)] return files except Exception: return [] hair_list = _list_imgs("hair_resposity") with gr.Accordion("发型库(点击选择后自动填充)", open=True): with gr.Group(elem_id="hair_gallery_wrap"): gallery = gr.Gallery(value=hair_list, columns=4, rows=2, allow_preview=True, label="发型库", elem_id="hair_gallery") def _pick_hair(evt: gr.SelectData): # type: ignore[name-defined] i = evt.index if hasattr(evt, 'index') else 0 i = 0 if i is None else int(i) if 0 <= i < len(hair_list): return gr.update(value=hair_list[i]) return gr.update() gallery.select(_pick_hair, inputs=None, outputs=hair_input) with gr.Column(scale=7, min_width=520, elem_classes=["right-pane"]): with gr.Tabs(): with gr.TabItem("生成视频"): with gr.Group(elem_classes=["out-card"]): video_out = gr.Video(label="生成的视频", height=340) with gr.Row(): frame_slider = gr.Slider(1, 21, value=1, step=1, label="多视角预览(拖动查看帧)") frame_preview = gr.Image(type="filepath", label="预览帧", height=260) frames_state = gr.State([]) with gr.TabItem("归一化对齐结果"): with gr.Group(elem_classes=["out-card"]): with gr.Row(): aligned_id_out = gr.Image(type="filepath", label="对齐后的身份图", height=240) aligned_hair_out = gr.Image(type="filepath", label="对齐后的发型图", height=240) with gr.TabItem("秃头化结果"): with gr.Group(elem_classes=["out-card"]): bald_id_out = gr.Image(type="filepath", label="秃头化后的身份图", height=260) run_btn.click( fn=inference, inputs=[id_input, hair_input], outputs=[aligned_id_out, aligned_hair_out, bald_id_out, video_out, frames_state, frame_slider, frame_preview], ) def _on_slide(frames, idx): if not frames: return gr.update() i = int(idx) - 1 i = max(0, min(i, len(frames) - 1)) return gr.update(value=frames[i]) frame_slider.change(_on_slide, inputs=[frames_state, frame_slider], outputs=frame_preview) def _clear(): return None, None, None, None, None clear_btn.click(_clear, None, [id_input, hair_input, aligned_id_out, aligned_hair_out, bald_id_out]) if __name__ == "__main__": demo.queue().launch(server_name="0.0.0.0", server_port=7860)