multimodalart HF Staff commited on
Commit
bfc6564
·
verified ·
1 Parent(s): f9d266f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -191
app.py CHANGED
@@ -1,119 +1,81 @@
1
  import os
2
- import random
 
 
 
3
  import sys
 
 
4
  from typing import Sequence, Mapping, Any, Union
5
- import spaces
 
 
 
 
 
6
  import torch
7
  import gradio as gr
8
  from huggingface_hub import hf_hub_download
9
  from comfy import model_management
10
- import comfy_extras.nodes_model_advanced
11
- import comfy_extras.nodes_custom_sampler
12
  from PIL import Image
 
 
 
13
 
14
- # --- Helper Functions from original script ---
 
 
 
 
 
 
 
15
 
 
16
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
17
  try:
18
  return obj[index]
19
  except KeyError:
20
  return obj["result"][index]
21
 
22
- def find_path(name: str, path: str = None) -> str:
23
- if path is None:
24
- path = os.getcwd()
25
- if name in os.listdir(path):
26
- path_name = os.path.join(path, name)
27
- print(f"{name} found: {path_name}")
28
- return path_name
29
- parent_directory = os.path.dirname(path)
30
- if parent_directory == path:
31
- return None
32
- return find_path(name, parent_directory)
33
-
34
- def add_comfyui_directory_to_sys_path() -> None:
35
- comfyui_path = os.getcwd()
36
- if "main.py" in os.listdir(comfyui_path):
37
- if comfyui_path not in sys.path:
38
- sys.path.append(comfyui_path)
39
- print(f"'{comfyui_path}' added to sys.path")
40
-
41
- def add_extra_model_paths() -> None:
42
- try:
43
- from main import load_extra_path_config
44
- except (ImportError, ModuleNotFoundError):
45
- print("Could not import from main.py, trying utils...")
46
- try:
47
- from utils.extra_config import load_extra_path_config
48
- except (ImportError, ModuleNotFoundError):
49
- print("Could not find load_extra_path_config function.")
50
- return
51
- extra_model_paths = find_path("extra_model_paths.yaml")
52
- if extra_model_paths:
53
- load_extra_path_config(extra_model_paths)
54
-
55
- def import_custom_nodes() -> None:
56
- import asyncio
57
- import execution
58
- from nodes import init_extra_nodes
59
- import server
60
- loop = asyncio.new_event_loop()
61
- asyncio.set_event_loop(loop)
62
- server_instance = server.PromptServer(loop)
63
- execution.PromptQueue(server_instance)
64
- init_extra_nodes()
65
-
66
- # --- Setup and Model Downloads ---
67
-
68
- add_comfyui_directory_to_sys_path()
69
- add_extra_model_paths()
70
- import_custom_nodes()
71
-
72
- from nodes import NODE_CLASS_MAPPINGS
73
-
74
- print("Downlading models from Hugging Face Hub...")
75
- # Text Encoder
76
- hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
77
- # UNETs
78
- hf_hub_download(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
79
- hf_hub_download(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
80
- # VAE
81
  hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
82
- # CLIP Vision
83
  hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
84
- # LoRAs
85
  hf_hub_download(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
86
  hf_hub_download(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", local_dir="models/loras")
87
  print("Downloads complete.")
88
 
89
- # --- ZeroGPU: Pre-load models and instantiate nodes globally ---
90
 
91
- # Instantiate Nodes
92
- cliploader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
93
- cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
94
- unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
95
- vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
96
- clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
97
- loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
98
- clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
99
- loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
100
- modelsamplingsd3 = NODE_CLASS_MAPPINGS["ModelSamplingSD3"]()
101
- pathchsageattentionkj = NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
102
- wanfirstlastframetovideo = NODE_CLASS_MAPPINGS["WanFirstLastFrameToVideo"]()
103
- ksampleradvanced = NODE_CLASS_MAPPINGS["KSamplerAdvanced"]()
104
- vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
105
- createvideo = NODE_CLASS_MAPPINGS["CreateVideo"]()
106
- savevideo = NODE_CLASS_MAPPINGS["SaveVideo"]()
107
- imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]() # For dynamic resizing
 
108
 
109
- # Load Models
110
  cliploader_38 = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="cpu")
111
  unetloader_37_low_noise = unetloader.load_unet(unet_name="wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
112
  unetloader_91_high_noise = unetloader.load_unet(unet_name="wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
113
  vaeloader_39 = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
114
  clipvisionloader_49 = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
115
 
116
- # Apply LoRAs and Patches
117
  loraloadermodelonly_94_high = loraloadermodelonly.load_lora_model_only(lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", strength_model=0.8, model=get_value_at_index(unetloader_91_high_noise, 0))
118
  loraloadermodelonly_95_low = loraloadermodelonly.load_lora_model_only(lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", strength_model=0.8, model=get_value_at_index(unetloader_37_low_noise, 0))
119
  modelsamplingsd3_93_low = modelsamplingsd3.patch(shift=8, model=get_value_at_index(loraloadermodelonly_95_low, 0))
@@ -121,141 +83,70 @@ pathchsageattentionkj_98_low = pathchsageattentionkj.patch(sage_attention="auto"
121
  modelsamplingsd3_79_high = modelsamplingsd3.patch(shift=8, model=get_value_at_index(loraloadermodelonly_94_high, 0))
122
  pathchsageattentionkj_96_high = pathchsageattentionkj.patch(sage_attention="auto", model=get_value_at_index(modelsamplingsd3_79_high, 0))
123
 
124
- # Pre-load models to GPU
125
  model_loaders = [cliploader_38, unetloader_37_low_noise, unetloader_91_high_noise, vaeloader_39, clipvisionloader_49, loraloadermodelonly_94_high, loraloadermodelonly_95_low]
126
  valid_models = [getattr(loader[0], 'patcher', loader[0]) for loader in model_loaders if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)]
127
  model_management.load_models_gpu(valid_models)
128
 
129
- # --- Custom Logic for this App ---
130
-
131
  def calculate_dimensions(image_path):
132
- with Image.open(image_path) as img:
133
- width, height = img.size
134
-
135
- if width == height:
136
- return 480, 480
137
-
138
- if width > height:
139
- new_width = 832
140
- new_height = int(height * (832 / width))
141
- else:
142
- new_height = 832
143
- new_width = int(width * (832 / height))
144
-
145
- # Ensure dimensions are multiples of 16
146
- new_width = (new_width // 16) * 16
147
- new_height = (new_height // 16) * 16
148
-
149
- return new_width, new_height
150
-
151
- # --- Main Generation Function ---
152
 
153
  @spaces.GPU(duration=120)
154
- def generate_video(prompt, first_image_path, last_image_path):
155
- # This function now only handles per-request logic
156
  with torch.inference_mode():
157
- # Calculate target dimensions based on the first image
 
 
158
  target_width, target_height = calculate_dimensions(first_image_path)
159
 
160
- # 1. Load and resize images
161
- # Since LoadImage returns a tensor, we pass it to the resize node
162
  loaded_first_image = loadimage.load_image(image=first_image_path)
163
- resized_first_image = imageresize.execute(
164
- width=target_width, height=target_height, interpolation="bicubic",
165
- method="stretch", condition="always", multiple_of=1,
166
- image=get_value_at_index(loaded_first_image, 0)
167
- )
168
-
169
  loaded_last_image = loadimage.load_image(image=last_image_path)
170
- resized_last_image = imageresize.execute(
171
- width=target_width, height=target_height, interpolation="bicubic",
172
- method="stretch", condition="always", multiple_of=1,
173
- image=get_value_at_index(loaded_last_image, 0)
174
- )
175
 
176
- # 2. Encode text and images
177
  cliptextencode_6 = cliptextencode.encode(text=prompt, clip=get_value_at_index(cliploader_38, 0))
178
- cliptextencode_7_negative = cliptextencode.encode(
179
- text="low quality, worst quality, jpeg artifacts, ugly, deformed, blurry",
180
- clip=get_value_at_index(cliploader_38, 0),
181
- )
182
  clipvisionencode_51 = clipvisionencode.encode(crop="none", clip_vision=get_value_at_index(clipvisionloader_49, 0), image=get_value_at_index(resized_first_image, 0))
183
  clipvisionencode_87 = clipvisionencode.encode(crop="none", clip_vision=get_value_at_index(clipvisionloader_49, 0), image=get_value_at_index(resized_last_image, 0))
184
 
185
- # 3. Prepare latents for video generation
186
- wanfirstlastframetovideo_83 = wanfirstlastframetovideo.EXECUTE_NORMALIZED(
187
- width=target_width, height=target_height, length=33, batch_size=1,
188
- positive=get_value_at_index(cliptextencode_6, 0),
189
- negative=get_value_at_index(cliptextencode_7_negative, 0),
190
- vae=get_value_at_index(vaeloader_39, 0),
191
- clip_vision_start_image=get_value_at_index(clipvisionencode_51, 0),
192
- clip_vision_end_image=get_value_at_index(clipvisionencode_87, 0),
193
- start_image=get_value_at_index(resized_first_image, 0),
194
- end_image=get_value_at_index(resized_last_image, 0),
195
- )
196
 
197
- # 4. KSampler pipeline
198
- ksampleradvanced_101 = ksampleradvanced.sample(
199
- add_noise="enable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1,
200
- sampler_name="euler", scheduler="simple", start_at_step=0, end_at_step=4,
201
- return_with_leftover_noise="enable", model=get_value_at_index(pathchsageattentionkj_96_high, 0),
202
- positive=get_value_at_index(wanfirstlastframetovideo_83, 0),
203
- negative=get_value_at_index(wanfirstlastframetovideo_83, 1),
204
- latent_image=get_value_at_index(wanfirstlastframetovideo_83, 2),
205
- )
206
- ksampleradvanced_102 = ksampleradvanced.sample(
207
- add_noise="disable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1,
208
- sampler_name="euler", scheduler="simple", start_at_step=4, end_at_step=10000,
209
- return_with_leftover_noise="disable", model=get_value_at_index(pathchsageattentionkj_98_low, 0),
210
- positive=get_value_at_index(wanfirstlastframetovideo_83, 0),
211
- negative=get_value_at_index(wanfirstlastframetovideo_83, 1),
212
- latent_image=get_value_at_index(ksampleradvanced_101, 0),
213
- )
214
 
215
- # 5. Decode and save video
216
  vaedecode_8 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_102, 0), vae=get_value_at_index(vaeloader_39, 0))
217
  createvideo_104 = createvideo.create_video(fps=16, images=get_value_at_index(vaedecode_8, 0))
218
  savevideo_103 = savevideo.save_video(filename_prefix="ComfyUI_Video", format="mp4", codec="libx264", video=get_value_at_index(createvideo_104, 0))
 
 
219
 
220
- # Return the path to the saved video
221
- video_filename = savevideo_103['ui']['videos'][0]['filename']
222
- return f"output/{video_filename}"
223
-
224
- # --- Gradio Interface ---
225
-
226
  with gr.Blocks() as app:
227
  gr.Markdown("# Wan 2.2 First/Last Frame to Video")
228
- gr.Markdown("Provide a starting image, an ending image, and a text prompt to generate a video transitioning between them.")
229
-
230
  with gr.Row():
231
  with gr.Column(scale=1):
232
- prompt_input = gr.Textbox(label="Prompt", value="the guy turns")
233
- first_image = gr.Image(label="First Frame", type="filepath")
234
- last_image = gr.Image(label="Last Frame", type="filepath")
 
 
235
  generate_btn = gr.Button("Generate Video")
236
  with gr.Column(scale=2):
237
  output_video = gr.Video(label="Generated Video")
238
-
239
- generate_btn.click(
240
- fn=generate_video,
241
- inputs=[prompt_input, first_image, last_image],
242
- outputs=[output_video]
243
- )
244
-
245
- gr.Examples(
246
- examples=[
247
- ["a beautiful woman, cinematic", "examples/start.png", "examples/end.png"]
248
- ],
249
- inputs=[prompt_input, first_image, last_image]
250
- )
251
 
252
  if __name__ == "__main__":
253
- # Create example images if they don't exist
254
- if not os.path.exists("examples"):
255
- os.makedirs("examples")
256
- if not os.path.exists("examples/start.png"):
257
- Image.new('RGB', (512, 512), color = 'red').save('examples/start.png')
258
- if not os.path.exists("examples/end.png"):
259
- Image.new('RGB', (512, 512), color = 'blue').save('examples/end.png')
260
-
261
  app.launch()
 
1
  import os
2
+
3
+ if os.getcwd() != '/home/user/app':
4
+ os.chdir('/home/user/app')
5
+
6
  import sys
7
+ import subprocess
8
+ import asyncio
9
  from typing import Sequence, Mapping, Any, Union
10
+
11
+ print("Importing ComfyUI's main.py for setup...")
12
+ import main
13
+ print("ComfyUI main imported.")
14
+
15
+
16
  import torch
17
  import gradio as gr
18
  from huggingface_hub import hf_hub_download
19
  from comfy import model_management
20
+ import spaces
 
21
  from PIL import Image
22
+ import random
23
+ import nodes # Import nodes after main has set everything up
24
+
25
 
26
+ # --- Manually trigger the node initialization ---
27
+ # This step is normally done inside main.start_comfyui(), but we do it here.
28
+ # It loads all built-in, extra, and custom nodes into the NODE_CLASS_MAPPINGS.
29
+ print("Initializing ComfyUI nodes...")
30
+ loop = asyncio.new_event_loop()
31
+ asyncio.set_event_loop(loop)
32
+ loop.run_until_complete(nodes.init_extra_nodes())
33
+ print("Nodes initialized.")
34
 
35
+ # --- Helper function from the original script ---
36
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
37
  try:
38
  return obj[index]
39
  except KeyError:
40
  return obj["result"][index]
41
 
42
+ # --- Model Downloads ---
43
+ print("Downloading models from Hugging Face Hub...")
44
+ hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safensors", local_dir="models/text_encoders")
45
+ hf_hub_download(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safensors", local_dir="models/diffusion_models")
46
+ hf_hub_download(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", local_dir="models/diffusion_models")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
 
48
  hf_hub_download(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
 
49
  hf_hub_download(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
50
  hf_hub_download(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", local_dir="models/loras")
51
  print("Downloads complete.")
52
 
 
53
 
54
+ # --- ZeroGPU: Pre-load models and instantiate nodes globally ---
55
+ # This part will now work because NODE_CLASS_MAPPINGS is correctly populated.
56
+ cliploader = nodes.NODE_CLASS_MAPPINGS["CLIPLoader"]()
57
+ cliptextencode = nodes.NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
58
+ unetloader = nodes.NODE_CLASS_MAPPINGS["UNETLoader"]()
59
+ vaeloader = nodes.NODE_CLASS_MAPPINGS["VAELoader"]()
60
+ clipvisionloader = nodes.NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
61
+ loadimage = nodes.NODE_CLASS_MAPPINGS["LoadImage"]()
62
+ clipvisionencode = nodes.NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
63
+ loraloadermodelonly = nodes.NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
64
+ modelsamplingsd3 = nodes.NODE_CLASS_MAPPINGS["ModelSamplingSD3"]()
65
+ pathchsageattentionkj = nodes.NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
66
+ wanfirstlastframetovideo = nodes.NODE_CLASS_MAPPINGS["WanFirstLastFrameToVideo"]()
67
+ ksampleradvanced = nodes.NODE_CLASS_MAPPINGS["KSamplerAdvanced"]()
68
+ vaedecode = nodes.NODE_CLASS_MAPPINGS["VAEDecode"]()
69
+ createvideo = nodes.NODE_CLASS_MAPPINGS["CreateVideo"]()
70
+ savevideo = nodes.NODE_CLASS_MAPPINGS["SaveVideo"]()
71
+ imageresize = nodes.NODE_CLASS_MAPPINGS["ImageResize+"]()
72
 
 
73
  cliploader_38 = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="cpu")
74
  unetloader_37_low_noise = unetloader.load_unet(unet_name="wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
75
  unetloader_91_high_noise = unetloader.load_unet(unet_name="wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
76
  vaeloader_39 = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
77
  clipvisionloader_49 = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
78
 
 
79
  loraloadermodelonly_94_high = loraloadermodelonly.load_lora_model_only(lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", strength_model=0.8, model=get_value_at_index(unetloader_91_high_noise, 0))
80
  loraloadermodelonly_95_low = loraloadermodelonly.load_lora_model_only(lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", strength_model=0.8, model=get_value_at_index(unetloader_37_low_noise, 0))
81
  modelsamplingsd3_93_low = modelsamplingsd3.patch(shift=8, model=get_value_at_index(loraloadermodelonly_95_low, 0))
 
83
  modelsamplingsd3_79_high = modelsamplingsd3.patch(shift=8, model=get_value_at_index(loraloadermodelonly_94_high, 0))
84
  pathchsageattentionkj_96_high = pathchsageattentionkj.patch(sage_attention="auto", model=get_value_at_index(modelsamplingsd3_79_high, 0))
85
 
 
86
  model_loaders = [cliploader_38, unetloader_37_low_noise, unetloader_91_high_noise, vaeloader_39, clipvisionloader_49, loraloadermodelonly_94_high, loraloadermodelonly_95_low]
87
  valid_models = [getattr(loader[0], 'patcher', loader[0]) for loader in model_loaders if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)]
88
  model_management.load_models_gpu(valid_models)
89
 
90
+ # --- App Logic ---
 
91
  def calculate_dimensions(image_path):
92
+ with Image.open(image_path) as img: width, height = img.size
93
+ if width == height: return 480, 480
94
+ if width > height: new_width, new_height = 832, int(height * (832 / width))
95
+ else: new_height, new_width = 832, int(width * (832 / height))
96
+ return (new_width // 16) * 16, (new_height // 16) * 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  @spaces.GPU(duration=120)
99
+ def generate_video(prompt, first_image_path, last_image_path, duration_seconds):
 
100
  with torch.inference_mode():
101
+ FPS, MAX_FRAMES = 16, 81
102
+ length_in_frames = max(1, min(int(duration_seconds * FPS), MAX_FRAMES))
103
+ print(f"Requested duration: {duration_seconds}s. Calculated frames: {length_in_frames}")
104
  target_width, target_height = calculate_dimensions(first_image_path)
105
 
 
 
106
  loaded_first_image = loadimage.load_image(image=first_image_path)
107
+ resized_first_image = imageresize.execute(width=target_width, height=target_height, interpolation="bicubic", method="stretch", image=get_value_at_index(loaded_first_image, 0))
 
 
 
 
 
108
  loaded_last_image = loadimage.load_image(image=last_image_path)
109
+ resized_last_image = imageresize.execute(width=target_width, height=target_height, interpolation="bicubic", method="stretch", image=get_value_at_index(loaded_last_image, 0))
 
 
 
 
110
 
 
111
  cliptextencode_6 = cliptextencode.encode(text=prompt, clip=get_value_at_index(cliploader_38, 0))
112
+ cliptextencode_7_negative = cliptextencode.encode(text="low quality, worst quality, jpeg artifacts, ugly, deformed, blurry", clip=get_value_at_index(cliploader_38, 0))
 
 
 
113
  clipvisionencode_51 = clipvisionencode.encode(crop="none", clip_vision=get_value_at_index(clipvisionloader_49, 0), image=get_value_at_index(resized_first_image, 0))
114
  clipvisionencode_87 = clipvisionencode.encode(crop="none", clip_vision=get_value_at_index(clipvisionloader_49, 0), image=get_value_at_index(resized_last_image, 0))
115
 
116
+ wanfirstlastframetovideo_83 = wanfirstlastframetovideo.EXECUTE_NORMALIZED(width=target_width, height=target_height, length=length_in_frames, batch_size=1, positive=get_value_at_index(cliptextencode_6, 0), negative=get_value_at_index(cliptextencode_7_negative, 0), vae=get_value_at_index(vaeloader_39, 0), clip_vision_start_image=get_value_at_index(clipvisionencode_51, 0), clip_vision_end_image=get_value_at_index(clipvisionencode_87, 0), start_image=get_value_at_index(resized_first_image, 0), end_image=get_value_at_index(resized_last_image, 0))
 
 
 
 
 
 
 
 
 
 
117
 
118
+ ksampler_positive = get_value_at_index(wanfirstlastframetovideo_83, 0)
119
+ ksampler_negative = get_value_at_index(wanfirstlastframetovideo_83, 1)
120
+ ksampler_latent = get_value_at_index(wanfirstlastframetovideo_83, 2)
121
+
122
+ ksampleradvanced_101 = ksampleradvanced.sample(add_noise="enable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1, sampler_name="euler", scheduler="simple", start_at_step=0, end_at_step=4, return_with_leftover_noise="enable", model=get_value_at_index(pathchsageattentionkj_96_high, 0), positive=ksampler_positive, negative=ksampler_negative, latent_image=ksampler_latent)
123
+ ksampleradvanced_102 = ksampleradvanced.sample(add_noise="disable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1, sampler_name="euler", scheduler="simple", start_at_step=4, end_at_step=10000, return_with_leftover_noise="disable", model=get_value_at_index(pathchsageattentionkj_98_low, 0), positive=ksampler_positive, negative=ksampler_negative, latent_image=get_value_at_index(ksampleradvanced_101, 0))
 
 
 
 
 
 
 
 
 
 
 
124
 
 
125
  vaedecode_8 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_102, 0), vae=get_value_at_index(vaeloader_39, 0))
126
  createvideo_104 = createvideo.create_video(fps=16, images=get_value_at_index(vaedecode_8, 0))
127
  savevideo_103 = savevideo.save_video(filename_prefix="ComfyUI_Video", format="mp4", codec="libx264", video=get_value_at_index(createvideo_104, 0))
128
+
129
+ return f"output/{savevideo_103['ui']['videos'][0]['filename']}"
130
 
131
+ # --- Gradio Interface (no changes needed) ---
 
 
 
 
 
132
  with gr.Blocks() as app:
133
  gr.Markdown("# Wan 2.2 First/Last Frame to Video")
134
+ gr.Markdown("Provide a starting image, an ending image, a text prompt, and a desired duration to generate a video transitioning between them.")
 
135
  with gr.Row():
136
  with gr.Column(scale=1):
137
+ prompt_input = gr.Textbox(label="Prompt", value="a man dancing in the street, cinematic")
138
+ duration_slider = gr.Slider(minimum=1.0, maximum=5.0, value=2.0, step=0.1, label="Video Duration (seconds)")
139
+ with gr.Row():
140
+ first_image = gr.Image(label="First Frame", type="filepath")
141
+ last_image = gr.Image(label="Last Frame", type="filepath")
142
  generate_btn = gr.Button("Generate Video")
143
  with gr.Column(scale=2):
144
  output_video = gr.Video(label="Generated Video")
145
+ generate_btn.click(fn=generate_video, inputs=[prompt_input, first_image, last_image, duration_slider], outputs=[output_video])
146
+ gr.Examples(examples=[["a beautiful woman, cinematic", "examples/start.png", "examples/end.png", 2.5]], inputs=[prompt_input, first_image, last_image, duration_slider])
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  if __name__ == "__main__":
149
+ if not os.path.exists("examples"): os.makedirs("examples")
150
+ if not os.path.exists("examples/start.png"): Image.new('RGB', (512, 512), color='red').save('examples/start.png')
151
+ if not os.path.exists("examples/end.png"): Image.new('RGB', (512, 512), color='blue').save('examples/end.png')
 
 
 
 
 
152
  app.launch()