Spaces:
Runtime error
Runtime error
import torch | |
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler | |
from diffusers.utils import export_to_video | |
from transformers import CLIPVisionModel | |
import gradio as gr | |
import tempfile | |
import os | |
import subprocess | |
from huggingface_hub import hf_hub_download | |
import numpy as np | |
from PIL import Image | |
import random | |
import warnings | |
warnings.filterwarnings("ignore", message=".*Attempting to use legacy OpenCV backend.*") | |
warnings.filterwarnings("ignore", message=".*num_frames - 1.*") | |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" | |
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX" | |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors" | |
# Global variable to hold the pipeline. It's initialized to None. | |
pipe = None | |
def initialize_pipeline(): | |
""" | |
Initializes the model pipeline on the first request. | |
This function is designed for serverless GPU environments like ZeroGPU. | |
""" | |
global pipe | |
# The 'pipe' global variable acts as a flag. If it's not None, we've already initialized. | |
if pipe is None: | |
print("First time setup: Initializing model pipeline...") | |
gr.Info("Cold start: The first generation will take longer as the model is loaded.") | |
if not torch.cuda.is_available(): | |
raise gr.Error("GPU not available. This application requires a GPU to run.") | |
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float16) | |
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float16) | |
# All model loading happens here, when a GPU is guaranteed to be active. | |
pipe = WanImageToVideoPipeline.from_pretrained( | |
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.float16 | |
) | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0) | |
pipe.enable_model_cpu_offload() | |
try: | |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME) | |
print("β LoRA downloaded to:", causvid_path) | |
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora") | |
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.75]) | |
pipe.fuse_lora() | |
except Exception as e: | |
raise gr.Error(f"Error loading LoRA: {e}") | |
print("β Pipeline initialized successfully.") | |
# --- Constants and Helper Functions --- | |
# (These are unchanged) | |
MOD_VALUE = 32 | |
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE = 640, 1024 | |
NEW_FORMULA_MAX_AREA = 640.0 * 1024.0 | |
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024 | |
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024 | |
MAX_SEED = np.iinfo(np.int32).max | |
FIXED_FPS, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL = 24, 8, 240 | |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation" | |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature" | |
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area, | |
min_slider_h, max_slider_h, min_slider_w, max_slider_w, | |
default_h, default_w): | |
orig_w, orig_h = pil_image.size | |
if orig_w <= 0 or orig_h <= 0: return default_h, default_w | |
aspect_ratio = orig_h / orig_w | |
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio)) | |
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio)) | |
calc_h = max(mod_val, (calc_h // mod_val) * mod_val) | |
calc_w = max(mod_val, (calc_w // mod_val) * mod_val) | |
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val)) | |
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val)) | |
return new_h, new_w | |
def handle_image_upload_for_dims_wan(uploaded_pil_image): | |
if uploaded_pil_image is None: | |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) | |
try: | |
new_h, new_w = _calculate_new_dimensions_wan( | |
uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA, | |
SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W, | |
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE | |
) | |
return gr.update(value=new_h), gr.update(value=new_w) | |
except Exception as e: | |
gr.Warning("Error calculating new dimensions.") | |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) | |
def export_video_with_ffmpeg(frames, output_path, fps=24): | |
try: | |
import imageio | |
writer = imageio.get_writer(output_path, fps=fps, codec='libx264', | |
pixelformat='yuv420p', quality=8) | |
for frame in frames: | |
writer.append_data(np.array(frame)) | |
writer.close() | |
except ImportError: | |
export_to_video(frames, output_path, fps=fps) | |
def generate_video(input_image, prompt, height, width, | |
negative_prompt, duration_seconds, | |
guidance_scale, steps, seed, randomize_seed, | |
progress=gr.Progress(track_tqdm=True)): | |
# --- LAZY LOADING TRIGGER --- | |
# This will load the model on the first run, and do nothing on subsequent runs. | |
initialize_pipeline() | |
if input_image is None: | |
raise gr.Error("Please upload an input image.") | |
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE) | |
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE) | |
raw_frames = int(round(duration_seconds * FIXED_FPS)) | |
num_frames = ((raw_frames - 1) // 4) * 4 + 1 | |
num_frames = np.clip(num_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL) | |
if num_frames > 120 and max(target_h, target_w) > 768: | |
scale_factor = 768 / max(target_h, target_w) | |
target_h = max(MOD_VALUE, int(target_h * scale_factor) // MOD_VALUE * MOD_VALUE) | |
target_w = max(MOD_VALUE, int(target_w * scale_factor) // MOD_VALUE * MOD_VALUE) | |
gr.Info(f"Reduced resolution to {target_w}x{target_h} for long video.") | |
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) | |
resized_image = input_image.resize((target_w, target_h), Image.Resampling.LANCZOS) | |
try: | |
torch.cuda.empty_cache() | |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16): | |
output_frames_list = pipe( | |
image=resized_image, prompt=prompt, negative_prompt=negative_prompt, | |
height=target_h, width=target_w, num_frames=num_frames, | |
guidance_scale=float(guidance_scale), num_inference_steps=int(steps), | |
generator=torch.Generator(device="cuda").manual_seed(current_seed) | |
).frames[0] | |
except torch.cuda.OutOfMemoryError: | |
raise gr.Error("Out of GPU memory. Try reducing duration or resolution.") | |
finally: | |
torch.cuda.empty_cache() | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
video_path = tmpfile.name | |
export_video_with_ffmpeg(output_frames_list, video_path, fps=FIXED_FPS) | |
return video_path, current_seed | |
# --- Gradio UI --- | |
with gr.Blocks() as demo: | |
gr.Markdown("# Wan 2.1 I2V FusionX-LoRA (ZeroGPU Ready)") | |
gr.Markdown("The first generation will be slow due to a 'cold start'. Subsequent generations will be much faster.") | |
with gr.Row(): | |
with gr.Column(): | |
input_image_component = gr.Image(type="pil", label="Input Image") | |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v) | |
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS, 1), step=0.1, value=2, label="Duration (seconds)") | |
with gr.Accordion("Advanced Settings", open=False): | |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3) | |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42) | |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) | |
with gr.Row(): | |
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label="Height") | |
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label="Width") | |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps") | |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False) | |
generate_button = gr.Button("Generate Video", variant="primary") | |
with gr.Column(): | |
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False) | |
gr.Markdown("### Tips:\n- Longer videos need more memory.\n- 4-8 steps is optimal.") | |
input_image_component.upload(fn=handle_image_upload_for_dims_wan, inputs=input_image_component, outputs=[height_input, width_input]) | |
ui_inputs = [input_image_component, prompt_input, height_input, width_input, negative_prompt_input, duration_seconds_input, guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox] | |
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input]) | |
if __name__ == "__main__": | |
# We launch the demo unconditionally now. The GPU check is deferred until the first click. | |
demo.queue(max_size=3).launch() |