Upload combined_stable_diffusion.py with huggingface_hub
Browse files
combined_stable_diffusion.py
CHANGED
|
@@ -440,7 +440,7 @@ class CombinedStableDiffusionXL(
|
|
| 440 |
|
| 441 |
latents = latents / self.vae.config.scaling_factor
|
| 442 |
|
| 443 |
-
image = self.vae.decode(latents
|
| 444 |
|
| 445 |
# cast back to fp16 if needed
|
| 446 |
if needs_upcasting:
|
|
@@ -449,7 +449,11 @@ class CombinedStableDiffusionXL(
|
|
| 449 |
image = latents
|
| 450 |
|
| 451 |
if not output_type == "latent":
|
| 452 |
-
image = self.image_processor.postprocess(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
|
| 454 |
# Offload all models
|
| 455 |
self.maybe_free_model_hooks()
|
|
|
|
| 440 |
|
| 441 |
latents = latents / self.vae.config.scaling_factor
|
| 442 |
|
| 443 |
+
image = self.vae.decode(latents).sample
|
| 444 |
|
| 445 |
# cast back to fp16 if needed
|
| 446 |
if needs_upcasting:
|
|
|
|
| 449 |
image = latents
|
| 450 |
|
| 451 |
if not output_type == "latent":
|
| 452 |
+
image = self.image_processor.postprocess(
|
| 453 |
+
image,
|
| 454 |
+
output_type=output_type,
|
| 455 |
+
do_denormalize=[True] * image.shape[0]
|
| 456 |
+
)
|
| 457 |
|
| 458 |
# Offload all models
|
| 459 |
self.maybe_free_model_hooks()
|