Spaces:
Running
on
Zero
Running
on
Zero
try fp16
Browse files
app.py
CHANGED
@@ -24,8 +24,8 @@ def hf_hub_download_local(repo_id, filename, local_dir, **kwargs):
|
|
24 |
print("Downloading models from Hugging Face Hub...")
|
25 |
text_encoder_repo = hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
|
26 |
print(text_encoder_repo)
|
27 |
-
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.
|
28 |
-
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.
|
29 |
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
|
30 |
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
|
31 |
hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
|
@@ -78,8 +78,8 @@ createvideo = nodes.NODE_CLASS_MAPPINGS["CreateVideo"]()
|
|
78 |
savevideo = nodes.NODE_CLASS_MAPPINGS["SaveVideo"]()
|
79 |
|
80 |
cliploader_38 = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="cpu")
|
81 |
-
unetloader_37_low_noise = unetloader.load_unet(unet_name="wan2.
|
82 |
-
unetloader_91_high_noise = unetloader.load_unet(unet_name="wan2.
|
83 |
vaeloader_39 = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
|
84 |
clipvisionloader_49 = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
|
85 |
|
@@ -92,6 +92,7 @@ pathchsageattentionkj_96_high = pathchsageattentionkj.patch(sage_attention="auto
|
|
92 |
|
93 |
model_loaders = [cliploader_38, unetloader_37_low_noise, unetloader_91_high_noise, vaeloader_39, clipvisionloader_49, loraloadermodelonly_94_high, loraloadermodelonly_95_low]
|
94 |
valid_models = [getattr(loader[0], 'patcher', loader[0]) for loader in model_loaders if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)]
|
|
|
95 |
|
96 |
# --- App Logic ---
|
97 |
def calculate_dimensions(width, height):
|
@@ -107,7 +108,7 @@ def generate_video(prompt, first_image_path, last_image_path, duration_seconds):
|
|
107 |
os.makedirs(temp_dir, exist_ok=True)
|
108 |
|
109 |
with torch.inference_mode():
|
110 |
-
|
111 |
# --- Python Image Preprocessing using Pillow ---
|
112 |
print("Preprocessing images with Pillow...")
|
113 |
with Image.open(first_image_path) as img:
|
|
|
24 |
print("Downloading models from Hugging Face Hub...")
|
25 |
text_encoder_repo = hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
|
26 |
print(text_encoder_repo)
|
27 |
+
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp16.safetensors", local_dir="models/unet")
|
28 |
+
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp16.safetensors", local_dir="models/unet")
|
29 |
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
|
30 |
hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
|
31 |
hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
|
|
|
78 |
savevideo = nodes.NODE_CLASS_MAPPINGS["SaveVideo"]()
|
79 |
|
80 |
cliploader_38 = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="cpu")
|
81 |
+
unetloader_37_low_noise = unetloader.load_unet(unet_name="wan2.2_i2v_low_noise_14B_fp16.safetensors", weight_dtype="default")
|
82 |
+
unetloader_91_high_noise = unetloader.load_unet(unet_name="wan2.2_i2v_high_noise_14B_fp16.safetensors", weight_dtype="default")
|
83 |
vaeloader_39 = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
|
84 |
clipvisionloader_49 = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
|
85 |
|
|
|
92 |
|
93 |
model_loaders = [cliploader_38, unetloader_37_low_noise, unetloader_91_high_noise, vaeloader_39, clipvisionloader_49, loraloadermodelonly_94_high, loraloadermodelonly_95_low]
|
94 |
valid_models = [getattr(loader[0], 'patcher', loader[0]) for loader in model_loaders if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)]
|
95 |
+
model_management.load_models_gpu(valid_models)
|
96 |
|
97 |
# --- App Logic ---
|
98 |
def calculate_dimensions(width, height):
|
|
|
108 |
os.makedirs(temp_dir, exist_ok=True)
|
109 |
|
110 |
with torch.inference_mode():
|
111 |
+
|
112 |
# --- Python Image Preprocessing using Pillow ---
|
113 |
print("Preprocessing images with Pillow...")
|
114 |
with Image.open(first_image_path) as img:
|