Update live_preview_helpers.py
Browse files- live_preview_helpers.py +4 -6
live_preview_helpers.py
CHANGED
|
@@ -60,7 +60,7 @@ def flux_pipe_call_that_returns_an_iterable_of_images(
|
|
| 60 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 61 |
max_sequence_length: int = 512,
|
| 62 |
good_vae: Optional[Any] = None,
|
| 63 |
-
cache_scope: Optional[Any] = None,
|
| 64 |
):
|
| 65 |
height = height or self.default_sample_size * self.vae_scale_factor
|
| 66 |
width = width or self.default_sample_size * self.vae_scale_factor
|
|
@@ -132,9 +132,8 @@ def flux_pipe_call_that_returns_an_iterable_of_images(
|
|
| 132 |
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
|
| 133 |
|
| 134 |
# 6. Denoising loop
|
| 135 |
-
# *** FIX 4: Define a generator and use the passed-in cache_scope with it ***
|
| 136 |
def denoise_loop_generator():
|
| 137 |
-
nonlocal latents
|
| 138 |
for i, t in enumerate(timesteps):
|
| 139 |
if self.interrupt:
|
| 140 |
continue
|
|
@@ -161,11 +160,10 @@ def flux_pipe_call_that_returns_an_iterable_of_images(
|
|
| 161 |
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 162 |
torch.cuda.empty_cache()
|
| 163 |
|
| 164 |
-
# Use the context manager if it was provided
|
| 165 |
if cache_scope:
|
| 166 |
-
with cache_scope
|
| 167 |
yield from denoise_loop_generator()
|
| 168 |
-
else:
|
| 169 |
yield from denoise_loop_generator()
|
| 170 |
|
| 171 |
# Final image using good_vae
|
|
|
|
| 60 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 61 |
max_sequence_length: int = 512,
|
| 62 |
good_vae: Optional[Any] = None,
|
| 63 |
+
cache_scope: Optional[Any] = None,
|
| 64 |
):
|
| 65 |
height = height or self.default_sample_size * self.vae_scale_factor
|
| 66 |
width = width or self.default_sample_size * self.vae_scale_factor
|
|
|
|
| 132 |
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
|
| 133 |
|
| 134 |
# 6. Denoising loop
|
|
|
|
| 135 |
def denoise_loop_generator():
|
| 136 |
+
nonlocal latents
|
| 137 |
for i, t in enumerate(timesteps):
|
| 138 |
if self.interrupt:
|
| 139 |
continue
|
|
|
|
| 160 |
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 161 |
torch.cuda.empty_cache()
|
| 162 |
|
|
|
|
| 163 |
if cache_scope:
|
| 164 |
+
with cache_scope:
|
| 165 |
yield from denoise_loop_generator()
|
| 166 |
+
else:
|
| 167 |
yield from denoise_loop_generator()
|
| 168 |
|
| 169 |
# Final image using good_vae
|