Upload folder using huggingface_hub
Browse files- main/adaptive_mask_inpainting.py +1 -1
- main/composable_stable_diffusion.py +1 -1
- main/edict_pipeline.py +1 -1
- main/fresco_v2v.py +1 -1
- main/gluegen.py +1 -1
- main/instaflow_one_step.py +1 -1
- main/ip_adapter_face_id.py +1 -1
- main/kohya_hires_fix.py +1 -1
- main/latent_consistency_img2img.py +1 -1
- main/latent_consistency_interpolate.py +1 -1
- main/latent_consistency_txt2img.py +1 -1
- main/llm_grounded_diffusion.py +1 -1
- main/lpw_stable_diffusion.py +1 -1
- main/lpw_stable_diffusion_xl.py +1 -1
- main/pipeline_animatediff_controlnet.py +1 -1
- main/pipeline_animatediff_img2video.py +1 -1
- main/pipeline_animatediff_ipex.py +1 -1
- main/pipeline_demofusion_sdxl.py +1 -1
- main/pipeline_fabric.py +1 -1
- main/pipeline_flux_differential_img2img.py +3 -4
- main/pipeline_flux_rf_inversion.py +1 -3
- main/pipeline_flux_with_cfg.py +1 -3
- main/pipeline_hunyuandit_differential_img2img.py +1 -3
- main/pipeline_kolors_differential_img2img.py +1 -3
- main/pipeline_prompt2prompt.py +1 -1
- main/pipeline_sdxl_style_aligned.py +1 -1
- main/pipeline_stable_diffusion_3_differential_img2img.py +1 -1
- main/pipeline_stable_diffusion_boxdiff.py +1 -1
- main/pipeline_stable_diffusion_pag.py +1 -1
- main/pipeline_stable_diffusion_upscale_ldm3d.py +1 -1
- main/pipeline_stable_diffusion_xl_controlnet_adapter.py +1 -1
- main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1 -1
- main/pipeline_stable_diffusion_xl_differential_img2img.py +1 -1
- main/pipeline_stable_diffusion_xl_ipex.py +1 -1
- main/pipeline_zero1to3.py +1 -1
- main/rerender_a_video.py +1 -1
- main/stable_diffusion_controlnet_img2img.py +1 -1
- main/stable_diffusion_controlnet_inpaint.py +1 -1
- main/stable_diffusion_controlnet_inpaint_img2img.py +1 -1
- main/stable_diffusion_ipex.py +1 -1
- main/stable_diffusion_reference.py +1 -1
- main/stable_diffusion_repaint.py +1 -1
- main/stable_diffusion_tensorrt_img2img.py +1 -1
- main/stable_diffusion_tensorrt_inpaint.py +1 -1
- main/stable_diffusion_tensorrt_txt2img.py +1 -1
main/adaptive_mask_inpainting.py
CHANGED
|
@@ -450,7 +450,7 @@ class AdaptiveMaskInpaintPipeline(
|
|
| 450 |
safety_checker=safety_checker,
|
| 451 |
feature_extractor=feature_extractor,
|
| 452 |
)
|
| 453 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 454 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 455 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 456 |
|
|
|
|
| 450 |
safety_checker=safety_checker,
|
| 451 |
feature_extractor=feature_extractor,
|
| 452 |
)
|
| 453 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 454 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 455 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 456 |
|
main/composable_stable_diffusion.py
CHANGED
|
@@ -162,7 +162,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
|
|
| 162 |
safety_checker=safety_checker,
|
| 163 |
feature_extractor=feature_extractor,
|
| 164 |
)
|
| 165 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 166 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 167 |
|
| 168 |
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
|
|
|
| 162 |
safety_checker=safety_checker,
|
| 163 |
feature_extractor=feature_extractor,
|
| 164 |
)
|
| 165 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 166 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 167 |
|
| 168 |
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
main/edict_pipeline.py
CHANGED
|
@@ -35,7 +35,7 @@ class EDICTPipeline(DiffusionPipeline):
|
|
| 35 |
scheduler=scheduler,
|
| 36 |
)
|
| 37 |
|
| 38 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 39 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 40 |
|
| 41 |
def _encode_prompt(
|
|
|
|
| 35 |
scheduler=scheduler,
|
| 36 |
)
|
| 37 |
|
| 38 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 39 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 40 |
|
| 41 |
def _encode_prompt(
|
main/fresco_v2v.py
CHANGED
|
@@ -1342,7 +1342,7 @@ class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
| 1342 |
feature_extractor=feature_extractor,
|
| 1343 |
image_encoder=image_encoder,
|
| 1344 |
)
|
| 1345 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 1346 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 1347 |
self.control_image_processor = VaeImageProcessor(
|
| 1348 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
|
|
|
| 1342 |
feature_extractor=feature_extractor,
|
| 1343 |
image_encoder=image_encoder,
|
| 1344 |
)
|
| 1345 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 1346 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 1347 |
self.control_image_processor = VaeImageProcessor(
|
| 1348 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
main/gluegen.py
CHANGED
|
@@ -221,7 +221,7 @@ class GlueGenStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, St
|
|
| 221 |
language_adapter=language_adapter,
|
| 222 |
tensor_norm=tensor_norm,
|
| 223 |
)
|
| 224 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 225 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 227 |
|
|
|
|
| 221 |
language_adapter=language_adapter,
|
| 222 |
tensor_norm=tensor_norm,
|
| 223 |
)
|
| 224 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 225 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 227 |
|
main/instaflow_one_step.py
CHANGED
|
@@ -182,7 +182,7 @@ class InstaFlowPipeline(
|
|
| 182 |
safety_checker=safety_checker,
|
| 183 |
feature_extractor=feature_extractor,
|
| 184 |
)
|
| 185 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 186 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 187 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 188 |
|
|
|
|
| 182 |
safety_checker=safety_checker,
|
| 183 |
feature_extractor=feature_extractor,
|
| 184 |
)
|
| 185 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 186 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 187 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 188 |
|
main/ip_adapter_face_id.py
CHANGED
|
@@ -265,7 +265,7 @@ class IPAdapterFaceIDStableDiffusionPipeline(
|
|
| 265 |
feature_extractor=feature_extractor,
|
| 266 |
image_encoder=image_encoder,
|
| 267 |
)
|
| 268 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 269 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 270 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 271 |
|
|
|
|
| 265 |
feature_extractor=feature_extractor,
|
| 266 |
image_encoder=image_encoder,
|
| 267 |
)
|
| 268 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 269 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 270 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 271 |
|
main/kohya_hires_fix.py
CHANGED
|
@@ -463,6 +463,6 @@ class StableDiffusionHighResFixPipeline(StableDiffusionPipeline):
|
|
| 463 |
feature_extractor=feature_extractor,
|
| 464 |
image_encoder=image_encoder,
|
| 465 |
)
|
| 466 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 467 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 468 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
|
|
|
| 463 |
feature_extractor=feature_extractor,
|
| 464 |
image_encoder=image_encoder,
|
| 465 |
)
|
| 466 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 467 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 468 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
main/latent_consistency_img2img.py
CHANGED
|
@@ -69,7 +69,7 @@ class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
|
|
| 69 |
safety_checker=safety_checker,
|
| 70 |
feature_extractor=feature_extractor,
|
| 71 |
)
|
| 72 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 73 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 74 |
|
| 75 |
def _encode_prompt(
|
|
|
|
| 69 |
safety_checker=safety_checker,
|
| 70 |
feature_extractor=feature_extractor,
|
| 71 |
)
|
| 72 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 73 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 74 |
|
| 75 |
def _encode_prompt(
|
main/latent_consistency_interpolate.py
CHANGED
|
@@ -273,7 +273,7 @@ class LatentConsistencyModelWalkPipeline(
|
|
| 273 |
safety_checker=safety_checker,
|
| 274 |
feature_extractor=feature_extractor,
|
| 275 |
)
|
| 276 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 277 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 278 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 279 |
|
|
|
|
| 273 |
safety_checker=safety_checker,
|
| 274 |
feature_extractor=feature_extractor,
|
| 275 |
)
|
| 276 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 277 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 278 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 279 |
|
main/latent_consistency_txt2img.py
CHANGED
|
@@ -67,7 +67,7 @@ class LatentConsistencyModelPipeline(DiffusionPipeline):
|
|
| 67 |
safety_checker=safety_checker,
|
| 68 |
feature_extractor=feature_extractor,
|
| 69 |
)
|
| 70 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 71 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 72 |
|
| 73 |
def _encode_prompt(
|
|
|
|
| 67 |
safety_checker=safety_checker,
|
| 68 |
feature_extractor=feature_extractor,
|
| 69 |
)
|
| 70 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 71 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 72 |
|
| 73 |
def _encode_prompt(
|
main/llm_grounded_diffusion.py
CHANGED
|
@@ -410,7 +410,7 @@ class LLMGroundedDiffusionPipeline(
|
|
| 410 |
feature_extractor=feature_extractor,
|
| 411 |
image_encoder=image_encoder,
|
| 412 |
)
|
| 413 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 414 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 415 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 416 |
|
|
|
|
| 410 |
feature_extractor=feature_extractor,
|
| 411 |
image_encoder=image_encoder,
|
| 412 |
)
|
| 413 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 414 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 415 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 416 |
|
main/lpw_stable_diffusion.py
CHANGED
|
@@ -568,7 +568,7 @@ class StableDiffusionLongPromptWeightingPipeline(
|
|
| 568 |
safety_checker=safety_checker,
|
| 569 |
feature_extractor=feature_extractor,
|
| 570 |
)
|
| 571 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 572 |
|
| 573 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 574 |
self.register_to_config(
|
|
|
|
| 568 |
safety_checker=safety_checker,
|
| 569 |
feature_extractor=feature_extractor,
|
| 570 |
)
|
| 571 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 572 |
|
| 573 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 574 |
self.register_to_config(
|
main/lpw_stable_diffusion_xl.py
CHANGED
|
@@ -673,7 +673,7 @@ class SDXLLongPromptWeightingPipeline(
|
|
| 673 |
image_encoder=image_encoder,
|
| 674 |
)
|
| 675 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 676 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 677 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 678 |
self.mask_processor = VaeImageProcessor(
|
| 679 |
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
|
|
|
| 673 |
image_encoder=image_encoder,
|
| 674 |
)
|
| 675 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 676 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 677 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 678 |
self.mask_processor = VaeImageProcessor(
|
| 679 |
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
main/pipeline_animatediff_controlnet.py
CHANGED
|
@@ -188,7 +188,7 @@ class AnimateDiffControlNetPipeline(
|
|
| 188 |
feature_extractor=feature_extractor,
|
| 189 |
image_encoder=image_encoder,
|
| 190 |
)
|
| 191 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 192 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 193 |
self.control_image_processor = VaeImageProcessor(
|
| 194 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
|
|
|
| 188 |
feature_extractor=feature_extractor,
|
| 189 |
image_encoder=image_encoder,
|
| 190 |
)
|
| 191 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 192 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 193 |
self.control_image_processor = VaeImageProcessor(
|
| 194 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
main/pipeline_animatediff_img2video.py
CHANGED
|
@@ -308,7 +308,7 @@ class AnimateDiffImgToVideoPipeline(
|
|
| 308 |
feature_extractor=feature_extractor,
|
| 309 |
image_encoder=image_encoder,
|
| 310 |
)
|
| 311 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 312 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 313 |
|
| 314 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
|
|
|
| 308 |
feature_extractor=feature_extractor,
|
| 309 |
image_encoder=image_encoder,
|
| 310 |
)
|
| 311 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 312 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 313 |
|
| 314 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
main/pipeline_animatediff_ipex.py
CHANGED
|
@@ -162,7 +162,7 @@ class AnimateDiffPipelineIpex(
|
|
| 162 |
feature_extractor=feature_extractor,
|
| 163 |
image_encoder=image_encoder,
|
| 164 |
)
|
| 165 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 166 |
self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor)
|
| 167 |
|
| 168 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
|
|
|
| 162 |
feature_extractor=feature_extractor,
|
| 163 |
image_encoder=image_encoder,
|
| 164 |
)
|
| 165 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 166 |
self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor)
|
| 167 |
|
| 168 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
main/pipeline_demofusion_sdxl.py
CHANGED
|
@@ -166,7 +166,7 @@ class DemoFusionSDXLPipeline(
|
|
| 166 |
scheduler=scheduler,
|
| 167 |
)
|
| 168 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 169 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 170 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 171 |
self.default_sample_size = self.unet.config.sample_size
|
| 172 |
|
|
|
|
| 166 |
scheduler=scheduler,
|
| 167 |
)
|
| 168 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 169 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 170 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 171 |
self.default_sample_size = self.unet.config.sample_size
|
| 172 |
|
main/pipeline_fabric.py
CHANGED
|
@@ -179,7 +179,7 @@ class FabricPipeline(DiffusionPipeline):
|
|
| 179 |
tokenizer=tokenizer,
|
| 180 |
scheduler=scheduler,
|
| 181 |
)
|
| 182 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 183 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 184 |
|
| 185 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
|
|
|
| 179 |
tokenizer=tokenizer,
|
| 180 |
scheduler=scheduler,
|
| 181 |
)
|
| 182 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 183 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 184 |
|
| 185 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
main/pipeline_flux_differential_img2img.py
CHANGED
|
@@ -221,13 +221,12 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
|
|
| 221 |
transformer=transformer,
|
| 222 |
scheduler=scheduler,
|
| 223 |
)
|
| 224 |
-
self.vae_scale_factor = (
|
| 225 |
-
2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
|
| 226 |
-
)
|
| 227 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
|
|
|
| 228 |
self.mask_processor = VaeImageProcessor(
|
| 229 |
vae_scale_factor=self.vae_scale_factor,
|
| 230 |
-
vae_latent_channels=
|
| 231 |
do_normalize=False,
|
| 232 |
do_binarize=False,
|
| 233 |
do_convert_grayscale=True,
|
|
|
|
| 221 |
transformer=transformer,
|
| 222 |
scheduler=scheduler,
|
| 223 |
)
|
| 224 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels)) if getattr(self, "vae", None) else 16
|
|
|
|
|
|
|
| 225 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
+
latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16
|
| 227 |
self.mask_processor = VaeImageProcessor(
|
| 228 |
vae_scale_factor=self.vae_scale_factor,
|
| 229 |
+
vae_latent_channels=latent_channels,
|
| 230 |
do_normalize=False,
|
| 231 |
do_binarize=False,
|
| 232 |
do_convert_grayscale=True,
|
main/pipeline_flux_rf_inversion.py
CHANGED
|
@@ -219,9 +219,7 @@ class RFInversionFluxPipeline(
|
|
| 219 |
transformer=transformer,
|
| 220 |
scheduler=scheduler,
|
| 221 |
)
|
| 222 |
-
self.vae_scale_factor = (
|
| 223 |
-
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
| 224 |
-
)
|
| 225 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
self.tokenizer_max_length = (
|
| 227 |
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
|
|
|
| 219 |
transformer=transformer,
|
| 220 |
scheduler=scheduler,
|
| 221 |
)
|
| 222 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
|
|
|
|
|
|
| 223 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 224 |
self.tokenizer_max_length = (
|
| 225 |
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
main/pipeline_flux_with_cfg.py
CHANGED
|
@@ -189,9 +189,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
|
|
| 189 |
transformer=transformer,
|
| 190 |
scheduler=scheduler,
|
| 191 |
)
|
| 192 |
-
self.vae_scale_factor = (
|
| 193 |
-
2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
|
| 194 |
-
)
|
| 195 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 196 |
self.tokenizer_max_length = (
|
| 197 |
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
|
|
|
| 189 |
transformer=transformer,
|
| 190 |
scheduler=scheduler,
|
| 191 |
)
|
| 192 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels)) if getattr(self, "vae", None) else 16
|
|
|
|
|
|
|
| 193 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 194 |
self.tokenizer_max_length = (
|
| 195 |
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
main/pipeline_hunyuandit_differential_img2img.py
CHANGED
|
@@ -327,9 +327,7 @@ class HunyuanDiTDifferentialImg2ImgPipeline(DiffusionPipeline):
|
|
| 327 |
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 328 |
)
|
| 329 |
|
| 330 |
-
self.vae_scale_factor = (
|
| 331 |
-
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
| 332 |
-
)
|
| 333 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 334 |
self.mask_processor = VaeImageProcessor(
|
| 335 |
vae_scale_factor=self.vae_scale_factor,
|
|
|
|
| 327 |
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 328 |
)
|
| 329 |
|
| 330 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
|
|
|
|
|
|
| 331 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 332 |
self.mask_processor = VaeImageProcessor(
|
| 333 |
vae_scale_factor=self.vae_scale_factor,
|
main/pipeline_kolors_differential_img2img.py
CHANGED
|
@@ -209,9 +209,7 @@ class KolorsDifferentialImg2ImgPipeline(
|
|
| 209 |
feature_extractor=feature_extractor,
|
| 210 |
)
|
| 211 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 212 |
-
self.vae_scale_factor = (
|
| 213 |
-
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
| 214 |
-
)
|
| 215 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 216 |
|
| 217 |
self.mask_processor = VaeImageProcessor(
|
|
|
|
| 209 |
feature_extractor=feature_extractor,
|
| 210 |
)
|
| 211 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 212 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
|
|
|
|
|
|
| 213 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 214 |
|
| 215 |
self.mask_processor = VaeImageProcessor(
|
main/pipeline_prompt2prompt.py
CHANGED
|
@@ -205,7 +205,7 @@ class Prompt2PromptPipeline(
|
|
| 205 |
feature_extractor=feature_extractor,
|
| 206 |
image_encoder=image_encoder,
|
| 207 |
)
|
| 208 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 209 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 210 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 211 |
|
|
|
|
| 205 |
feature_extractor=feature_extractor,
|
| 206 |
image_encoder=image_encoder,
|
| 207 |
)
|
| 208 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 209 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 210 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 211 |
|
main/pipeline_sdxl_style_aligned.py
CHANGED
|
@@ -488,7 +488,7 @@ class StyleAlignedSDXLPipeline(
|
|
| 488 |
feature_extractor=feature_extractor,
|
| 489 |
)
|
| 490 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 491 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 492 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 493 |
self.mask_processor = VaeImageProcessor(
|
| 494 |
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
|
|
|
| 488 |
feature_extractor=feature_extractor,
|
| 489 |
)
|
| 490 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 491 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 492 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 493 |
self.mask_processor = VaeImageProcessor(
|
| 494 |
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
main/pipeline_stable_diffusion_3_differential_img2img.py
CHANGED
|
@@ -207,7 +207,7 @@ class StableDiffusion3DifferentialImg2ImgPipeline(DiffusionPipeline):
|
|
| 207 |
transformer=transformer,
|
| 208 |
scheduler=scheduler,
|
| 209 |
)
|
| 210 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 211 |
self.image_processor = VaeImageProcessor(
|
| 212 |
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels
|
| 213 |
)
|
|
|
|
| 207 |
transformer=transformer,
|
| 208 |
scheduler=scheduler,
|
| 209 |
)
|
| 210 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 211 |
self.image_processor = VaeImageProcessor(
|
| 212 |
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels
|
| 213 |
)
|
main/pipeline_stable_diffusion_boxdiff.py
CHANGED
|
@@ -491,7 +491,7 @@ class StableDiffusionBoxDiffPipeline(
|
|
| 491 |
feature_extractor=feature_extractor,
|
| 492 |
image_encoder=image_encoder,
|
| 493 |
)
|
| 494 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 495 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 496 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 497 |
|
|
|
|
| 491 |
feature_extractor=feature_extractor,
|
| 492 |
image_encoder=image_encoder,
|
| 493 |
)
|
| 494 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 495 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 496 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 497 |
|
main/pipeline_stable_diffusion_pag.py
CHANGED
|
@@ -458,7 +458,7 @@ class StableDiffusionPAGPipeline(
|
|
| 458 |
feature_extractor=feature_extractor,
|
| 459 |
image_encoder=image_encoder,
|
| 460 |
)
|
| 461 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 462 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 463 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 464 |
|
|
|
|
| 458 |
feature_extractor=feature_extractor,
|
| 459 |
image_encoder=image_encoder,
|
| 460 |
)
|
| 461 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 462 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 463 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 464 |
|
main/pipeline_stable_diffusion_upscale_ldm3d.py
CHANGED
|
@@ -151,7 +151,7 @@ class StableDiffusionUpscaleLDM3DPipeline(
|
|
| 151 |
watermarker=watermarker,
|
| 152 |
feature_extractor=feature_extractor,
|
| 153 |
)
|
| 154 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 155 |
self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
|
| 156 |
# self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 157 |
self.register_to_config(max_noise_level=max_noise_level)
|
|
|
|
| 151 |
watermarker=watermarker,
|
| 152 |
feature_extractor=feature_extractor,
|
| 153 |
)
|
| 154 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 155 |
self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
|
| 156 |
# self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 157 |
self.register_to_config(max_noise_level=max_noise_level)
|
main/pipeline_stable_diffusion_xl_controlnet_adapter.py
CHANGED
|
@@ -226,7 +226,7 @@ class StableDiffusionXLControlNetAdapterPipeline(
|
|
| 226 |
scheduler=scheduler,
|
| 227 |
)
|
| 228 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 229 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 230 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 231 |
self.control_image_processor = VaeImageProcessor(
|
| 232 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
|
|
|
| 226 |
scheduler=scheduler,
|
| 227 |
)
|
| 228 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 229 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 230 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 231 |
self.control_image_processor = VaeImageProcessor(
|
| 232 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py
CHANGED
|
@@ -374,7 +374,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
|
|
| 374 |
)
|
| 375 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 376 |
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 377 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 378 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 379 |
self.control_image_processor = VaeImageProcessor(
|
| 380 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
|
|
|
| 374 |
)
|
| 375 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 376 |
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 377 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 378 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 379 |
self.control_image_processor = VaeImageProcessor(
|
| 380 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
main/pipeline_stable_diffusion_xl_differential_img2img.py
CHANGED
|
@@ -258,7 +258,7 @@ class StableDiffusionXLDifferentialImg2ImgPipeline(
|
|
| 258 |
)
|
| 259 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 260 |
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 261 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 262 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 263 |
|
| 264 |
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
|
|
|
| 258 |
)
|
| 259 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 260 |
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 261 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 262 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 263 |
|
| 264 |
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
main/pipeline_stable_diffusion_xl_ipex.py
CHANGED
|
@@ -253,7 +253,7 @@ class StableDiffusionXLPipelineIpex(
|
|
| 253 |
feature_extractor=feature_extractor,
|
| 254 |
)
|
| 255 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 256 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 257 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 258 |
|
| 259 |
self.default_sample_size = self.unet.config.sample_size
|
|
|
|
| 253 |
feature_extractor=feature_extractor,
|
| 254 |
)
|
| 255 |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 256 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 257 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 258 |
|
| 259 |
self.default_sample_size = self.unet.config.sample_size
|
main/pipeline_zero1to3.py
CHANGED
|
@@ -181,7 +181,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
|
| 181 |
feature_extractor=feature_extractor,
|
| 182 |
cc_projection=cc_projection,
|
| 183 |
)
|
| 184 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 185 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 186 |
# self.model_mode = None
|
| 187 |
|
|
|
|
| 181 |
feature_extractor=feature_extractor,
|
| 182 |
cc_projection=cc_projection,
|
| 183 |
)
|
| 184 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 185 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 186 |
# self.model_mode = None
|
| 187 |
|
main/rerender_a_video.py
CHANGED
|
@@ -352,7 +352,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
| 352 |
safety_checker=safety_checker,
|
| 353 |
feature_extractor=feature_extractor,
|
| 354 |
)
|
| 355 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 356 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 357 |
self.control_image_processor = VaeImageProcessor(
|
| 358 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
|
|
|
| 352 |
safety_checker=safety_checker,
|
| 353 |
feature_extractor=feature_extractor,
|
| 354 |
)
|
| 355 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 356 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 357 |
self.control_image_processor = VaeImageProcessor(
|
| 358 |
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
main/stable_diffusion_controlnet_img2img.py
CHANGED
|
@@ -179,7 +179,7 @@ class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusio
|
|
| 179 |
safety_checker=safety_checker,
|
| 180 |
feature_extractor=feature_extractor,
|
| 181 |
)
|
| 182 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 183 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 184 |
|
| 185 |
def _encode_prompt(
|
|
|
|
| 179 |
safety_checker=safety_checker,
|
| 180 |
feature_extractor=feature_extractor,
|
| 181 |
)
|
| 182 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 183 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 184 |
|
| 185 |
def _encode_prompt(
|
main/stable_diffusion_controlnet_inpaint.py
CHANGED
|
@@ -278,7 +278,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusio
|
|
| 278 |
feature_extractor=feature_extractor,
|
| 279 |
)
|
| 280 |
|
| 281 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 282 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 283 |
|
| 284 |
def _encode_prompt(
|
|
|
|
| 278 |
feature_extractor=feature_extractor,
|
| 279 |
)
|
| 280 |
|
| 281 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 282 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 283 |
|
| 284 |
def _encode_prompt(
|
main/stable_diffusion_controlnet_inpaint_img2img.py
CHANGED
|
@@ -263,7 +263,7 @@ class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline, StableD
|
|
| 263 |
safety_checker=safety_checker,
|
| 264 |
feature_extractor=feature_extractor,
|
| 265 |
)
|
| 266 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 267 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 268 |
|
| 269 |
def _encode_prompt(
|
|
|
|
| 263 |
safety_checker=safety_checker,
|
| 264 |
feature_extractor=feature_extractor,
|
| 265 |
)
|
| 266 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 267 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 268 |
|
| 269 |
def _encode_prompt(
|
main/stable_diffusion_ipex.py
CHANGED
|
@@ -178,7 +178,7 @@ class StableDiffusionIPEXPipeline(
|
|
| 178 |
safety_checker=safety_checker,
|
| 179 |
feature_extractor=feature_extractor,
|
| 180 |
)
|
| 181 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 182 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 183 |
|
| 184 |
def get_input_example(self, prompt, height=None, width=None, guidance_scale=7.5, num_images_per_prompt=1):
|
|
|
|
| 178 |
safety_checker=safety_checker,
|
| 179 |
feature_extractor=feature_extractor,
|
| 180 |
)
|
| 181 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 182 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 183 |
|
| 184 |
def get_input_example(self, prompt, height=None, width=None, guidance_scale=7.5, num_images_per_prompt=1):
|
main/stable_diffusion_reference.py
CHANGED
|
@@ -219,7 +219,7 @@ class StableDiffusionReferencePipeline(
|
|
| 219 |
safety_checker=safety_checker,
|
| 220 |
feature_extractor=feature_extractor,
|
| 221 |
)
|
| 222 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 223 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 224 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 225 |
|
|
|
|
| 219 |
safety_checker=safety_checker,
|
| 220 |
feature_extractor=feature_extractor,
|
| 221 |
)
|
| 222 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 223 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 224 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 225 |
|
main/stable_diffusion_repaint.py
CHANGED
|
@@ -274,7 +274,7 @@ class StableDiffusionRepaintPipeline(
|
|
| 274 |
safety_checker=safety_checker,
|
| 275 |
feature_extractor=feature_extractor,
|
| 276 |
)
|
| 277 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 278 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 279 |
|
| 280 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
|
|
|
| 274 |
safety_checker=safety_checker,
|
| 275 |
feature_extractor=feature_extractor,
|
| 276 |
)
|
| 277 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 278 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 279 |
|
| 280 |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
main/stable_diffusion_tensorrt_img2img.py
CHANGED
|
@@ -806,7 +806,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|
| 806 |
self.engine = {} # loaded in build_engines()
|
| 807 |
|
| 808 |
self.vae.forward = self.vae.decode
|
| 809 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 810 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 811 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 812 |
|
|
|
|
| 806 |
self.engine = {} # loaded in build_engines()
|
| 807 |
|
| 808 |
self.vae.forward = self.vae.decode
|
| 809 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 810 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 811 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 812 |
|
main/stable_diffusion_tensorrt_inpaint.py
CHANGED
|
@@ -810,7 +810,7 @@ class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
|
|
| 810 |
self.engine = {} # loaded in build_engines()
|
| 811 |
|
| 812 |
self.vae.forward = self.vae.decode
|
| 813 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 814 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 815 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 816 |
|
|
|
|
| 810 |
self.engine = {} # loaded in build_engines()
|
| 811 |
|
| 812 |
self.vae.forward = self.vae.decode
|
| 813 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 814 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 815 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 816 |
|
main/stable_diffusion_tensorrt_txt2img.py
CHANGED
|
@@ -722,7 +722,7 @@ class TensorRTStableDiffusionPipeline(DiffusionPipeline):
|
|
| 722 |
self.engine = {} # loaded in build_engines()
|
| 723 |
|
| 724 |
self.vae.forward = self.vae.decode
|
| 725 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 726 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 727 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 728 |
|
|
|
|
| 722 |
self.engine = {} # loaded in build_engines()
|
| 723 |
|
| 724 |
self.vae.forward = self.vae.decode
|
| 725 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 726 |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 727 |
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 728 |
|