Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -206,40 +206,39 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
|
|
| 206 |
#print("Models moved.")
|
| 207 |
|
| 208 |
result_images_tensor = None
|
| 209 |
-
|
| 210 |
-
if
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
|
| 243 |
|
| 244 |
if result_images_tensor is None:
|
| 245 |
raise gr.Error("Generation failed.")
|
|
|
|
| 206 |
#print("Models moved.")
|
| 207 |
|
| 208 |
result_images_tensor = None
|
| 209 |
+
if improve_texture_flag:
|
| 210 |
+
if not active_latent_upsampler:
|
| 211 |
+
raise gr.Error("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
|
| 212 |
+
|
| 213 |
+
multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
|
| 214 |
+
|
| 215 |
+
first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
|
| 216 |
+
first_pass_args["guidance_scale"] = float(ui_guidance_scale)
|
| 217 |
+
if "timesteps" not in first_pass_args:
|
| 218 |
+
first_pass_args["num_inference_steps"] = int(ui_steps)
|
| 219 |
+
|
| 220 |
+
second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
|
| 221 |
+
second_pass_args["guidance_scale"] = float(ui_guidance_scale)
|
| 222 |
+
|
| 223 |
+
multi_scale_call_kwargs = call_kwargs.copy()
|
| 224 |
+
multi_scale_call_kwargs.update({
|
| 225 |
+
"downscale_factor": PIPELINE_CONFIG_YAML["downscale_factor"],
|
| 226 |
+
"first_pass": first_pass_args,
|
| 227 |
+
"second_pass": second_pass_args,
|
| 228 |
+
})
|
| 229 |
+
|
| 230 |
+
print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}) on {target_inference_device}")
|
| 231 |
+
result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
|
| 232 |
+
else:
|
| 233 |
+
single_pass_call_kwargs = call_kwargs.copy()
|
| 234 |
+
single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale)
|
| 235 |
+
single_pass_call_kwargs["num_inference_steps"] = int(ui_steps)
|
| 236 |
+
single_pass_call_kwargs.pop("first_pass", None)
|
| 237 |
+
single_pass_call_kwargs.pop("second_pass", None)
|
| 238 |
+
single_pass_call_kwargs.pop("downscale_factor", None)
|
| 239 |
+
|
| 240 |
+
print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}) on {target_inference_device}")
|
| 241 |
+
result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
|
|
|
|
| 242 |
|
| 243 |
if result_images_tensor is None:
|
| 244 |
raise gr.Error("Generation failed.")
|