Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -48,6 +48,7 @@ def generate(prompt,
|
|
| 48 |
frames_to_use,
|
| 49 |
seed,
|
| 50 |
randomize_seed,
|
|
|
|
| 51 |
improve_texture=False, progress=gr.Progress(track_tqdm=True)):
|
| 52 |
|
| 53 |
if randomize_seed:
|
|
@@ -63,16 +64,15 @@ def generate(prompt,
|
|
| 63 |
if mode == "text-to-video" and (video is not None):
|
| 64 |
video = load_video(video)[:frames_to_use]
|
| 65 |
condition = True
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
print("WTFFFFFF")
|
| 69 |
video = [image]
|
| 70 |
condition = True
|
| 71 |
else:
|
| 72 |
condition=False
|
| 73 |
|
| 74 |
if condition:
|
| 75 |
-
print("WTFFFFFF")
|
| 76 |
condition1 = LTXVideoCondition(video=video, frame_index=0)
|
| 77 |
else:
|
| 78 |
condition1 = None
|
|
@@ -87,7 +87,7 @@ def generate(prompt,
|
|
| 87 |
num_inference_steps=steps,
|
| 88 |
decode_timestep = 0.05,
|
| 89 |
decode_noise_scale = 0.025,
|
| 90 |
-
guidance_scale=
|
| 91 |
generator=torch.Generator(device="cuda").manual_seed(seed),
|
| 92 |
output_type="latent",
|
| 93 |
).frames
|
|
@@ -110,7 +110,7 @@ def generate(prompt,
|
|
| 110 |
width=upscaled_width,
|
| 111 |
height=upscaled_height,
|
| 112 |
num_frames=num_frames,
|
| 113 |
-
guidance_scale=
|
| 114 |
denoise_strength=0.6, # Effectively, 0.6 * 3 inference steps
|
| 115 |
num_inference_steps=3,
|
| 116 |
latents=upscaled_latents,
|
|
@@ -178,6 +178,7 @@ with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
|
|
| 178 |
seed = gr.Number(label="seed", value=0, precision=0)
|
| 179 |
randomize_seed = gr.Checkbox(label="randomize seed")
|
| 180 |
with gr.Row():
|
|
|
|
| 181 |
steps = gr.Slider(label="Steps", minimum=1, maximum=30, value=8, step=1)
|
| 182 |
num_frames = gr.Slider(label="# frames", minimum=1, maximum=161, value=96, step=1)
|
| 183 |
with gr.Row():
|
|
@@ -201,7 +202,7 @@ with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
|
|
| 201 |
num_frames,
|
| 202 |
frames_to_use,
|
| 203 |
seed,
|
| 204 |
-
randomize_seed, improve_texture],
|
| 205 |
outputs=[output])
|
| 206 |
|
| 207 |
|
|
|
|
| 48 |
frames_to_use,
|
| 49 |
seed,
|
| 50 |
randomize_seed,
|
| 51 |
+
guidance_scale,
|
| 52 |
improve_texture=False, progress=gr.Progress(track_tqdm=True)):
|
| 53 |
|
| 54 |
if randomize_seed:
|
|
|
|
| 64 |
if mode == "text-to-video" and (video is not None):
|
| 65 |
video = load_video(video)[:frames_to_use]
|
| 66 |
condition = True
|
| 67 |
+
elif mode == "image-to-video" and (image is not None):
|
| 68 |
+
print("WTFFFFFF 1")
|
|
|
|
| 69 |
video = [image]
|
| 70 |
condition = True
|
| 71 |
else:
|
| 72 |
condition=False
|
| 73 |
|
| 74 |
if condition:
|
| 75 |
+
print("WTFFFFFF 2")
|
| 76 |
condition1 = LTXVideoCondition(video=video, frame_index=0)
|
| 77 |
else:
|
| 78 |
condition1 = None
|
|
|
|
| 87 |
num_inference_steps=steps,
|
| 88 |
decode_timestep = 0.05,
|
| 89 |
decode_noise_scale = 0.025,
|
| 90 |
+
guidance_scale=guidance_scale,
|
| 91 |
generator=torch.Generator(device="cuda").manual_seed(seed),
|
| 92 |
output_type="latent",
|
| 93 |
).frames
|
|
|
|
| 110 |
width=upscaled_width,
|
| 111 |
height=upscaled_height,
|
| 112 |
num_frames=num_frames,
|
| 113 |
+
guidance_scale=guidance_scale,
|
| 114 |
denoise_strength=0.6, # Effectively, 0.6 * 3 inference steps
|
| 115 |
num_inference_steps=3,
|
| 116 |
latents=upscaled_latents,
|
|
|
|
| 178 |
seed = gr.Number(label="seed", value=0, precision=0)
|
| 179 |
randomize_seed = gr.Checkbox(label="randomize seed")
|
| 180 |
with gr.Row():
|
| 181 |
+
guidance_scale= gr.Slider(label="guidance scale", minimum=0, maximum=10, value=3, step=1)
|
| 182 |
steps = gr.Slider(label="Steps", minimum=1, maximum=30, value=8, step=1)
|
| 183 |
num_frames = gr.Slider(label="# frames", minimum=1, maximum=161, value=96, step=1)
|
| 184 |
with gr.Row():
|
|
|
|
| 202 |
num_frames,
|
| 203 |
frames_to_use,
|
| 204 |
seed,
|
| 205 |
+
randomize_seed,guidance_scale, improve_texture],
|
| 206 |
outputs=[output])
|
| 207 |
|
| 208 |
|