Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -201,26 +201,12 @@ def generate_video(gallery_images, mode, prompt, height, width,
|
|
201 |
reference_images = None
|
202 |
elif mode == "Ref2V":
|
203 |
frames, mask = prepare_video_and_mask_Ref2V(height=target_h, width=target_w, num_frames=num_frames)
|
204 |
-
|
205 |
-
reference_images = [img.resize((target_w, target_h)) for img in gallery_images]
|
206 |
else: # mode == "Random2V"
|
207 |
-
# Calculate appropriate frame indices based on number of images and frames
|
208 |
-
num_images = len(gallery_images)
|
209 |
-
if num_images == 1:
|
210 |
-
frame_indices = [num_frames // 2] # Place single image in the middle
|
211 |
-
elif num_images == 2:
|
212 |
-
frame_indices = [0, num_frames - 1] # Place at start and end
|
213 |
-
else:
|
214 |
-
# Distribute images evenly across the video
|
215 |
-
# Ensure we don't exceed available frames
|
216 |
-
max_images = min(num_images, num_frames)
|
217 |
-
step = max(1, num_frames // max_images)
|
218 |
-
frame_indices = [min(i * step, num_frames - 1) for i in range(max_images)]
|
219 |
-
gallery_images = gallery_images[:max_images] # Limit images to what we can use
|
220 |
|
221 |
frames, mask = prepare_video_and_mask_Random2V(
|
222 |
images=gallery_images,
|
223 |
-
frame_indices=
|
224 |
height=target_h,
|
225 |
width=target_w,
|
226 |
num_frames=num_frames
|
@@ -298,7 +284,7 @@ with gr.Blocks() as demo:
|
|
298 |
|
299 |
with gr.Column():
|
300 |
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
|
301 |
-
with gr.Accordion("Mode Information", open=
|
302 |
gr.Markdown("""
|
303 |
**Processing Modes:**
|
304 |
- **Ref2V**: Uses uploaded images as style references for video generation. All frames are generated based on the reference images.
|
@@ -331,4 +317,4 @@ with gr.Blocks() as demo:
|
|
331 |
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
|
332 |
|
333 |
if __name__ == "__main__":
|
334 |
-
demo.queue().launch()
|
|
|
201 |
reference_images = None
|
202 |
elif mode == "Ref2V":
|
203 |
frames, mask = prepare_video_and_mask_Ref2V(height=target_h, width=target_w, num_frames=num_frames)
|
204 |
+
reference_images = gallery_images
|
|
|
205 |
else: # mode == "Random2V"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
207 |
frames, mask = prepare_video_and_mask_Random2V(
|
208 |
images=gallery_images,
|
209 |
+
frame_indices=[0,20,40], # todo - generalize
|
210 |
height=target_h,
|
211 |
width=target_w,
|
212 |
num_frames=num_frames
|
|
|
284 |
|
285 |
with gr.Column():
|
286 |
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
|
287 |
+
with gr.Accordion("Mode Information", open=False):
|
288 |
gr.Markdown("""
|
289 |
**Processing Modes:**
|
290 |
- **Ref2V**: Uses uploaded images as style references for video generation. All frames are generated based on the reference images.
|
|
|
317 |
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
|
318 |
|
319 |
if __name__ == "__main__":
|
320 |
+
demo.queue().launch(mcp_server=True)
|