Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
2266b5f
·
verified ·
1 Parent(s): 3b83aa0

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. main/composable_stable_diffusion.py +1 -1
  2. main/imagic_stable_diffusion.py +1 -1
  3. main/img2img_inpainting.py +1 -1
  4. main/interpolate_stable_diffusion.py +1 -1
  5. main/lpw_stable_diffusion.py +2 -2
  6. main/lpw_stable_diffusion_onnx.py +2 -2
  7. main/lpw_stable_diffusion_xl.py +1 -1
  8. main/multilingual_stable_diffusion.py +1 -1
  9. main/pipeline_controlnet_xl_kolors.py +1 -1
  10. main/pipeline_controlnet_xl_kolors_img2img.py +1 -1
  11. main/pipeline_controlnet_xl_kolors_inpaint.py +1 -1
  12. main/pipeline_demofusion_sdxl.py +1 -1
  13. main/pipeline_faithdiff_stable_diffusion_xl.py +1 -1
  14. main/pipeline_flux_differential_img2img.py +2 -2
  15. main/pipeline_flux_kontext_multiple_images.py +1 -1
  16. main/pipeline_flux_rf_inversion.py +1 -1
  17. main/pipeline_flux_semantic_guidance.py +1 -1
  18. main/pipeline_flux_with_cfg.py +1 -1
  19. main/pipeline_kolors_differential_img2img.py +1 -1
  20. main/pipeline_kolors_inpainting.py +1 -1
  21. main/pipeline_prompt2prompt.py +1 -1
  22. main/pipeline_sdxl_style_aligned.py +1 -1
  23. main/pipeline_stable_diffusion_3_differential_img2img.py +1 -1
  24. main/pipeline_stable_diffusion_3_instruct_pix2pix.py +1 -1
  25. main/pipeline_stable_diffusion_xl_attentive_eraser.py +1 -1
  26. main/pipeline_stable_diffusion_xl_controlnet_adapter.py +1 -1
  27. main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1 -1
  28. main/pipeline_stable_diffusion_xl_differential_img2img.py +1 -1
  29. main/pipeline_stable_diffusion_xl_ipex.py +1 -1
  30. main/pipeline_stg_cogvideox.py +1 -1
  31. main/pipeline_stg_ltx.py +1 -1
  32. main/pipeline_stg_ltx_image2video.py +1 -1
  33. main/pipeline_stg_mochi.py +1 -1
  34. main/pipeline_zero1to3.py +1 -1
  35. main/rerender_a_video.py +1 -1
  36. main/run_onnx_controlnet.py +1 -1
  37. main/run_tensorrt_controlnet.py +1 -1
  38. main/sd_text2img_k_diffusion.py +1 -1
  39. main/seed_resize_stable_diffusion.py +1 -1
  40. main/stable_diffusion_comparison.py +1 -1
  41. main/stable_diffusion_controlnet_img2img.py +1 -1
  42. main/stable_diffusion_controlnet_inpaint.py +1 -1
  43. main/stable_diffusion_controlnet_inpaint_img2img.py +1 -1
  44. main/stable_diffusion_controlnet_reference.py +1 -1
  45. main/stable_diffusion_ipex.py +1 -1
  46. main/stable_diffusion_reference.py +1 -1
  47. main/stable_diffusion_repaint.py +1 -1
  48. main/stable_diffusion_xl_reference.py +1 -1
  49. main/text_inpainting.py +1 -1
  50. main/tiled_upscaling.py +1 -1
main/composable_stable_diffusion.py CHANGED
@@ -398,7 +398,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
398
  latents (`torch.Tensor`, *optional*):
399
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
400
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
401
- tensor will ge generated by sampling using the supplied random `generator`.
402
  output_type (`str`, *optional*, defaults to `"pil"`):
403
  The output format of the generate image. Choose between
404
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
398
  latents (`torch.Tensor`, *optional*):
399
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
400
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
401
+ tensor will be generated by sampling using the supplied random `generator`.
402
  output_type (`str`, *optional*, defaults to `"pil"`):
403
  The output format of the generate image. Choose between
404
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/imagic_stable_diffusion.py CHANGED
@@ -147,7 +147,7 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
147
  latents (`torch.Tensor`, *optional*):
148
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
149
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
150
- tensor will ge generated by sampling using the supplied random `generator`.
151
  output_type (`str`, *optional*, defaults to `"pil"`):
152
  The output format of the generate image. Choose between
153
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
 
147
  latents (`torch.Tensor`, *optional*):
148
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
149
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
150
+ tensor will be generated by sampling using the supplied random `generator`.
151
  output_type (`str`, *optional*, defaults to `"pil"`):
152
  The output format of the generate image. Choose between
153
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
main/img2img_inpainting.py CHANGED
@@ -197,7 +197,7 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline):
197
  latents (`torch.Tensor`, *optional*):
198
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
199
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
200
- tensor will ge generated by sampling using the supplied random `generator`.
201
  output_type (`str`, *optional*, defaults to `"pil"`):
202
  The output format of the generate image. Choose between
203
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
197
  latents (`torch.Tensor`, *optional*):
198
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
199
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
200
+ tensor will be generated by sampling using the supplied random `generator`.
201
  output_type (`str`, *optional*, defaults to `"pil"`):
202
  The output format of the generate image. Choose between
203
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/interpolate_stable_diffusion.py CHANGED
@@ -173,7 +173,7 @@ class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
173
  latents (`torch.Tensor`, *optional*):
174
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
175
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
176
- tensor will ge generated by sampling using the supplied random `generator`.
177
  output_type (`str`, *optional*, defaults to `"pil"`):
178
  The output format of the generate image. Choose between
179
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
173
  latents (`torch.Tensor`, *optional*):
174
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
175
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
176
+ tensor will be generated by sampling using the supplied random `generator`.
177
  output_type (`str`, *optional*, defaults to `"pil"`):
178
  The output format of the generate image. Choose between
179
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/lpw_stable_diffusion.py CHANGED
@@ -888,7 +888,7 @@ class StableDiffusionLongPromptWeightingPipeline(
888
  latents (`torch.Tensor`, *optional*):
889
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
- tensor will ge generated by sampling using the supplied random `generator`.
892
  prompt_embeds (`torch.Tensor`, *optional*):
893
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
  provided, text embeddings will be generated from `prompt` input argument.
@@ -1131,7 +1131,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1131
  latents (`torch.Tensor`, *optional*):
1132
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1133
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1134
- tensor will ge generated by sampling using the supplied random `generator`.
1135
  prompt_embeds (`torch.Tensor`, *optional*):
1136
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1137
  provided, text embeddings will be generated from `prompt` input argument.
 
888
  latents (`torch.Tensor`, *optional*):
889
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
+ tensor will be generated by sampling using the supplied random `generator`.
892
  prompt_embeds (`torch.Tensor`, *optional*):
893
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
  provided, text embeddings will be generated from `prompt` input argument.
 
1131
  latents (`torch.Tensor`, *optional*):
1132
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1133
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1134
+ tensor will be generated by sampling using the supplied random `generator`.
1135
  prompt_embeds (`torch.Tensor`, *optional*):
1136
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1137
  provided, text embeddings will be generated from `prompt` input argument.
main/lpw_stable_diffusion_onnx.py CHANGED
@@ -721,7 +721,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
721
  latents (`np.ndarray`, *optional*):
722
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
723
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
724
- tensor will ge generated by sampling using the supplied random `generator`.
725
  max_embeddings_multiples (`int`, *optional*, defaults to `3`):
726
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
727
  output_type (`str`, *optional*, defaults to `"pil"`):
@@ -918,7 +918,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
918
  latents (`np.ndarray`, *optional*):
919
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
- tensor will ge generated by sampling using the supplied random `generator`.
922
  max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
  output_type (`str`, *optional*, defaults to `"pil"`):
 
721
  latents (`np.ndarray`, *optional*):
722
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
723
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
724
+ tensor will be generated by sampling using the supplied random `generator`.
725
  max_embeddings_multiples (`int`, *optional*, defaults to `3`):
726
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
727
  output_type (`str`, *optional*, defaults to `"pil"`):
 
918
  latents (`np.ndarray`, *optional*):
919
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
+ tensor will be generated by sampling using the supplied random `generator`.
922
  max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
  output_type (`str`, *optional*, defaults to `"pil"`):
main/lpw_stable_diffusion_xl.py CHANGED
@@ -1519,7 +1519,7 @@ class SDXLLongPromptWeightingPipeline(
1519
  latents (`torch.Tensor`, *optional*):
1520
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1521
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1522
- tensor will ge generated by sampling using the supplied random `generator`.
1523
  ip_adapter_image: (`PipelineImageInput`, *optional*):
1524
  Optional image input to work with IP Adapters.
1525
  prompt_embeds (`torch.Tensor`, *optional*):
 
1519
  latents (`torch.Tensor`, *optional*):
1520
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1521
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1522
+ tensor will be generated by sampling using the supplied random `generator`.
1523
  ip_adapter_image: (`PipelineImageInput`, *optional*):
1524
  Optional image input to work with IP Adapters.
1525
  prompt_embeds (`torch.Tensor`, *optional*):
main/multilingual_stable_diffusion.py CHANGED
@@ -187,7 +187,7 @@ class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
187
  latents (`torch.Tensor`, *optional*):
188
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
189
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
190
- tensor will ge generated by sampling using the supplied random `generator`.
191
  output_type (`str`, *optional*, defaults to `"pil"`):
192
  The output format of the generate image. Choose between
193
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
187
  latents (`torch.Tensor`, *optional*):
188
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
189
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
190
+ tensor will be generated by sampling using the supplied random `generator`.
191
  output_type (`str`, *optional*, defaults to `"pil"`):
192
  The output format of the generate image. Choose between
193
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/pipeline_controlnet_xl_kolors.py CHANGED
@@ -888,7 +888,7 @@ class KolorsControlNetPipeline(
888
  latents (`torch.Tensor`, *optional*):
889
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
- tensor will ge generated by sampling using the supplied random `generator`.
892
  prompt_embeds (`torch.Tensor`, *optional*):
893
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
  provided, text embeddings will be generated from `prompt` input argument.
 
888
  latents (`torch.Tensor`, *optional*):
889
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
+ tensor will be generated by sampling using the supplied random `generator`.
892
  prompt_embeds (`torch.Tensor`, *optional*):
893
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_controlnet_xl_kolors_img2img.py CHANGED
@@ -1066,7 +1066,7 @@ class KolorsControlNetImg2ImgPipeline(
1066
  latents (`torch.Tensor`, *optional*):
1067
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1068
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1069
- tensor will ge generated by sampling using the supplied random `generator`.
1070
  prompt_embeds (`torch.Tensor`, *optional*):
1071
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1072
  provided, text embeddings will be generated from `prompt` input argument.
 
1066
  latents (`torch.Tensor`, *optional*):
1067
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1068
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1069
+ tensor will be generated by sampling using the supplied random `generator`.
1070
  prompt_embeds (`torch.Tensor`, *optional*):
1071
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1072
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_controlnet_xl_kolors_inpaint.py CHANGED
@@ -1298,7 +1298,7 @@ class KolorsControlNetInpaintPipeline(
1298
  latents (`torch.Tensor`, *optional*):
1299
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1300
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1301
- tensor will ge generated by sampling using the supplied random `generator`.
1302
  output_type (`str`, *optional*, defaults to `"pil"`):
1303
  The output format of the generate image. Choose between
1304
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
1298
  latents (`torch.Tensor`, *optional*):
1299
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1300
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1301
+ tensor will be generated by sampling using the supplied random `generator`.
1302
  output_type (`str`, *optional*, defaults to `"pil"`):
1303
  The output format of the generate image. Choose between
1304
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/pipeline_demofusion_sdxl.py CHANGED
@@ -724,7 +724,7 @@ class DemoFusionSDXLPipeline(
724
  latents (`torch.Tensor`, *optional*):
725
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
726
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
727
- tensor will ge generated by sampling using the supplied random `generator`.
728
  prompt_embeds (`torch.Tensor`, *optional*):
729
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
730
  provided, text embeddings will be generated from `prompt` input argument.
 
724
  latents (`torch.Tensor`, *optional*):
725
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
726
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
727
+ tensor will be generated by sampling using the supplied random `generator`.
728
  prompt_embeds (`torch.Tensor`, *optional*):
729
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
730
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_faithdiff_stable_diffusion_xl.py CHANGED
@@ -1906,7 +1906,7 @@ class FaithDiffStableDiffusionXLPipeline(
1906
  latents (`torch.FloatTensor`, *optional*):
1907
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1908
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1909
- tensor will ge generated by sampling using the supplied random `generator`.
1910
  prompt_embeds (`torch.FloatTensor`, *optional*):
1911
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1912
  provided, text embeddings will be generated from `prompt` input argument.
 
1906
  latents (`torch.FloatTensor`, *optional*):
1907
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1908
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1909
+ tensor will be generated by sampling using the supplied random `generator`.
1910
  prompt_embeds (`torch.FloatTensor`, *optional*):
1911
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1912
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_flux_differential_img2img.py CHANGED
@@ -730,7 +730,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
730
  1)`, or `(H, W)`.
731
  mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`):
732
  `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask
733
- latents tensor will ge generated by `mask_image`.
734
  height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
735
  The height in pixels of the generated image. This is set to 1024 by default for the best results.
736
  width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
@@ -769,7 +769,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
769
  latents (`torch.FloatTensor`, *optional*):
770
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
771
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
772
- tensor will ge generated by sampling using the supplied random `generator`.
773
  prompt_embeds (`torch.FloatTensor`, *optional*):
774
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
775
  provided, text embeddings will be generated from `prompt` input argument.
 
730
  1)`, or `(H, W)`.
731
  mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`):
732
  `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask
733
+ latents tensor will be generated by `mask_image`.
734
  height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
735
  The height in pixels of the generated image. This is set to 1024 by default for the best results.
736
  width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
 
769
  latents (`torch.FloatTensor`, *optional*):
770
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
771
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
772
+ tensor will be generated by sampling using the supplied random `generator`.
773
  prompt_embeds (`torch.FloatTensor`, *optional*):
774
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
775
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_flux_kontext_multiple_images.py CHANGED
@@ -885,7 +885,7 @@ class FluxKontextPipeline(
885
  latents (`torch.FloatTensor`, *optional*):
886
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
887
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
888
- tensor will ge generated by sampling using the supplied random `generator`.
889
  prompt_embeds (`torch.FloatTensor`, *optional*):
890
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
891
  provided, text embeddings will be generated from `prompt` input argument.
 
885
  latents (`torch.FloatTensor`, *optional*):
886
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
887
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
888
+ tensor will be generated by sampling using the supplied random `generator`.
889
  prompt_embeds (`torch.FloatTensor`, *optional*):
890
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
891
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_flux_rf_inversion.py CHANGED
@@ -711,7 +711,7 @@ class RFInversionFluxPipeline(
711
  latents (`torch.FloatTensor`, *optional*):
712
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
713
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
714
- tensor will ge generated by sampling using the supplied random `generator`.
715
  prompt_embeds (`torch.FloatTensor`, *optional*):
716
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
717
  provided, text embeddings will be generated from `prompt` input argument.
 
711
  latents (`torch.FloatTensor`, *optional*):
712
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
713
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
714
+ tensor will be generated by sampling using the supplied random `generator`.
715
  prompt_embeds (`torch.FloatTensor`, *optional*):
716
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
717
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_flux_semantic_guidance.py CHANGED
@@ -853,7 +853,7 @@ class FluxSemanticGuidancePipeline(
853
  latents (`torch.FloatTensor`, *optional*):
854
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
855
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
856
- tensor will ge generated by sampling using the supplied random `generator`.
857
  prompt_embeds (`torch.FloatTensor`, *optional*):
858
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
859
  provided, text embeddings will be generated from `prompt` input argument.
 
853
  latents (`torch.FloatTensor`, *optional*):
854
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
855
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
856
+ tensor will be generated by sampling using the supplied random `generator`.
857
  prompt_embeds (`torch.FloatTensor`, *optional*):
858
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
859
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_flux_with_cfg.py CHANGED
@@ -639,7 +639,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
639
  latents (`torch.FloatTensor`, *optional*):
640
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
641
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
642
- tensor will ge generated by sampling using the supplied random `generator`.
643
  prompt_embeds (`torch.FloatTensor`, *optional*):
644
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
645
  provided, text embeddings will be generated from `prompt` input argument.
 
639
  latents (`torch.FloatTensor`, *optional*):
640
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
641
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
642
+ tensor will be generated by sampling using the supplied random `generator`.
643
  prompt_embeds (`torch.FloatTensor`, *optional*):
644
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
645
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_kolors_differential_img2img.py CHANGED
@@ -904,7 +904,7 @@ class KolorsDifferentialImg2ImgPipeline(
904
  latents (`torch.Tensor`, *optional*):
905
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
906
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
907
- tensor will ge generated by sampling using the supplied random `generator`.
908
  prompt_embeds (`torch.Tensor`, *optional*):
909
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
910
  provided, text embeddings will be generated from `prompt` input argument.
 
904
  latents (`torch.Tensor`, *optional*):
905
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
906
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
907
+ tensor will be generated by sampling using the supplied random `generator`.
908
  prompt_embeds (`torch.Tensor`, *optional*):
909
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
910
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_kolors_inpainting.py CHANGED
@@ -1246,7 +1246,7 @@ class KolorsInpaintPipeline(
1246
  latents (`torch.Tensor`, *optional*):
1247
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1248
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1249
- tensor will ge generated by sampling using the supplied random `generator`.
1250
  output_type (`str`, *optional*, defaults to `"pil"`):
1251
  The output format of the generate image. Choose between
1252
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
1246
  latents (`torch.Tensor`, *optional*):
1247
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1248
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1249
+ tensor will be generated by sampling using the supplied random `generator`.
1250
  output_type (`str`, *optional*, defaults to `"pil"`):
1251
  The output format of the generate image. Choose between
1252
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/pipeline_prompt2prompt.py CHANGED
@@ -611,7 +611,7 @@ class Prompt2PromptPipeline(
611
  latents (`torch.Tensor`, *optional*):
612
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
613
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
614
- tensor will ge generated by sampling using the supplied random `generator`.
615
  output_type (`str`, *optional*, defaults to `"pil"`):
616
  The output format of the generate image. Choose between
617
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
611
  latents (`torch.Tensor`, *optional*):
612
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
613
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
614
+ tensor will be generated by sampling using the supplied random `generator`.
615
  output_type (`str`, *optional*, defaults to `"pil"`):
616
  The output format of the generate image. Choose between
617
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/pipeline_sdxl_style_aligned.py CHANGED
@@ -1480,7 +1480,7 @@ class StyleAlignedSDXLPipeline(
1480
  latents (`torch.Tensor`, *optional*):
1481
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1482
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1483
- tensor will ge generated by sampling using the supplied random `generator`.
1484
  prompt_embeds (`torch.Tensor`, *optional*):
1485
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1486
  provided, text embeddings will be generated from `prompt` input argument.
 
1480
  latents (`torch.Tensor`, *optional*):
1481
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1482
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1483
+ tensor will be generated by sampling using the supplied random `generator`.
1484
  prompt_embeds (`torch.Tensor`, *optional*):
1485
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1486
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_3_differential_img2img.py CHANGED
@@ -748,7 +748,7 @@ class StableDiffusion3DifferentialImg2ImgPipeline(DiffusionPipeline):
748
  latents (`torch.FloatTensor`, *optional*):
749
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
750
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
751
- tensor will ge generated by sampling using the supplied random `generator`.
752
  prompt_embeds (`torch.FloatTensor`, *optional*):
753
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
754
  provided, text embeddings will be generated from `prompt` input argument.
 
748
  latents (`torch.FloatTensor`, *optional*):
749
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
750
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
751
+ tensor will be generated by sampling using the supplied random `generator`.
752
  prompt_embeds (`torch.FloatTensor`, *optional*):
753
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
754
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_3_instruct_pix2pix.py CHANGED
@@ -945,7 +945,7 @@ class StableDiffusion3InstructPix2PixPipeline(
945
  latents (`torch.FloatTensor`, *optional*):
946
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
947
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
948
- tensor will ge generated by sampling using the supplied random `generator`.
949
  prompt_embeds (`torch.FloatTensor`, *optional*):
950
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
951
  provided, text embeddings will be generated from `prompt` input argument.
 
945
  latents (`torch.FloatTensor`, *optional*):
946
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
947
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
948
+ tensor will be generated by sampling using the supplied random `generator`.
949
  prompt_embeds (`torch.FloatTensor`, *optional*):
950
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
951
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_xl_attentive_eraser.py CHANGED
@@ -1786,7 +1786,7 @@ class StableDiffusionXL_AE_Pipeline(
1786
  latents (`torch.FloatTensor`, *optional*):
1787
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1788
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1789
- tensor will ge generated by sampling using the supplied random `generator`.
1790
  output_type (`str`, *optional*, defaults to `"pil"`):
1791
  The output format of the generate image. Choose between
1792
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
1786
  latents (`torch.FloatTensor`, *optional*):
1787
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1788
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1789
+ tensor will be generated by sampling using the supplied random `generator`.
1790
  output_type (`str`, *optional*, defaults to `"pil"`):
1791
  The output format of the generate image. Choose between
1792
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/pipeline_stable_diffusion_xl_controlnet_adapter.py CHANGED
@@ -973,7 +973,7 @@ class StableDiffusionXLControlNetAdapterPipeline(
973
  latents (`torch.Tensor`, *optional*):
974
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
975
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
976
- tensor will ge generated by sampling using the supplied random `generator`.
977
  prompt_embeds (`torch.Tensor`, *optional*):
978
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
979
  provided, text embeddings will be generated from `prompt` input argument.
 
973
  latents (`torch.Tensor`, *optional*):
974
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
975
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
976
+ tensor will be generated by sampling using the supplied random `generator`.
977
  prompt_embeds (`torch.Tensor`, *optional*):
978
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
979
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py CHANGED
@@ -1329,7 +1329,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
1329
  latents (`torch.Tensor`, *optional*):
1330
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1331
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1332
- tensor will ge generated by sampling using the supplied random `generator`.
1333
  prompt_embeds (`torch.Tensor`, *optional*):
1334
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1335
  provided, text embeddings will be generated from `prompt` input argument.
 
1329
  latents (`torch.Tensor`, *optional*):
1330
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1331
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1332
+ tensor will be generated by sampling using the supplied random `generator`.
1333
  prompt_embeds (`torch.Tensor`, *optional*):
1334
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1335
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_xl_differential_img2img.py CHANGED
@@ -1053,7 +1053,7 @@ class StableDiffusionXLDifferentialImg2ImgPipeline(
1053
  latents (`torch.Tensor`, *optional*):
1054
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1055
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1056
- tensor will ge generated by sampling using the supplied random `generator`.
1057
  prompt_embeds (`torch.Tensor`, *optional*):
1058
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1059
  provided, text embeddings will be generated from `prompt` input argument.
 
1053
  latents (`torch.Tensor`, *optional*):
1054
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1055
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1056
+ tensor will be generated by sampling using the supplied random `generator`.
1057
  prompt_embeds (`torch.Tensor`, *optional*):
1058
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1059
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stable_diffusion_xl_ipex.py CHANGED
@@ -832,7 +832,7 @@ class StableDiffusionXLPipelineIpex(
832
  latents (`torch.Tensor`, *optional*):
833
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
834
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
835
- tensor will ge generated by sampling using the supplied random `generator`.
836
  prompt_embeds (`torch.Tensor`, *optional*):
837
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
838
  provided, text embeddings will be generated from `prompt` input argument.
 
832
  latents (`torch.Tensor`, *optional*):
833
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
834
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
835
+ tensor will be generated by sampling using the supplied random `generator`.
836
  prompt_embeds (`torch.Tensor`, *optional*):
837
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
838
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stg_cogvideox.py CHANGED
@@ -632,7 +632,7 @@ class CogVideoXSTGPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
632
  latents (`torch.FloatTensor`, *optional*):
633
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
634
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
635
- tensor will ge generated by sampling using the supplied random `generator`.
636
  prompt_embeds (`torch.FloatTensor`, *optional*):
637
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
638
  provided, text embeddings will be generated from `prompt` input argument.
 
632
  latents (`torch.FloatTensor`, *optional*):
633
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
634
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
635
+ tensor will be generated by sampling using the supplied random `generator`.
636
  prompt_embeds (`torch.FloatTensor`, *optional*):
637
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
638
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stg_ltx.py CHANGED
@@ -620,7 +620,7 @@ class LTXSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderM
620
  latents (`torch.Tensor`, *optional*):
621
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
622
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
623
- tensor will ge generated by sampling using the supplied random `generator`.
624
  prompt_embeds (`torch.Tensor`, *optional*):
625
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
626
  provided, text embeddings will be generated from `prompt` input argument.
 
620
  latents (`torch.Tensor`, *optional*):
621
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
622
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
623
+ tensor will be generated by sampling using the supplied random `generator`.
624
  prompt_embeds (`torch.Tensor`, *optional*):
625
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
626
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stg_ltx_image2video.py CHANGED
@@ -682,7 +682,7 @@ class LTXImageToVideoSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVide
682
  latents (`torch.Tensor`, *optional*):
683
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
684
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
685
- tensor will ge generated by sampling using the supplied random `generator`.
686
  prompt_embeds (`torch.Tensor`, *optional*):
687
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
688
  provided, text embeddings will be generated from `prompt` input argument.
 
682
  latents (`torch.Tensor`, *optional*):
683
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
684
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
685
+ tensor will be generated by sampling using the supplied random `generator`.
686
  prompt_embeds (`torch.Tensor`, *optional*):
687
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
688
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_stg_mochi.py CHANGED
@@ -603,7 +603,7 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin):
603
  latents (`torch.Tensor`, *optional*):
604
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
605
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
606
- tensor will ge generated by sampling using the supplied random `generator`.
607
  prompt_embeds (`torch.Tensor`, *optional*):
608
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
609
  provided, text embeddings will be generated from `prompt` input argument.
 
603
  latents (`torch.Tensor`, *optional*):
604
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
605
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
606
+ tensor will be generated by sampling using the supplied random `generator`.
607
  prompt_embeds (`torch.Tensor`, *optional*):
608
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
609
  provided, text embeddings will be generated from `prompt` input argument.
main/pipeline_zero1to3.py CHANGED
@@ -657,7 +657,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
657
  latents (`torch.Tensor`, *optional*):
658
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
659
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
660
- tensor will ge generated by sampling using the supplied random `generator`.
661
  prompt_embeds (`torch.Tensor`, *optional*):
662
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
663
  provided, text embeddings will be generated from `prompt` input argument.
 
657
  latents (`torch.Tensor`, *optional*):
658
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
659
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
660
+ tensor will be generated by sampling using the supplied random `generator`.
661
  prompt_embeds (`torch.Tensor`, *optional*):
662
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
663
  provided, text embeddings will be generated from `prompt` input argument.
main/rerender_a_video.py CHANGED
@@ -656,7 +656,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
656
  latents (`torch.Tensor`, *optional*):
657
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
658
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
659
- tensor will ge generated by sampling using the supplied random `generator`.
660
  prompt_embeds (`torch.Tensor`, *optional*):
661
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
662
  provided, text embeddings will be generated from `prompt` input argument.
 
656
  latents (`torch.Tensor`, *optional*):
657
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
658
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
659
+ tensor will be generated by sampling using the supplied random `generator`.
660
  prompt_embeds (`torch.Tensor`, *optional*):
661
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
662
  provided, text embeddings will be generated from `prompt` input argument.
main/run_onnx_controlnet.py CHANGED
@@ -591,7 +591,7 @@ class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
591
  latents (`torch.Tensor`, *optional*):
592
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
593
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
594
- tensor will ge generated by sampling using the supplied random `generator`.
595
  prompt_embeds (`torch.Tensor`, *optional*):
596
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
597
  provided, text embeddings will be generated from `prompt` input argument.
 
591
  latents (`torch.Tensor`, *optional*):
592
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
593
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
594
+ tensor will be generated by sampling using the supplied random `generator`.
595
  prompt_embeds (`torch.Tensor`, *optional*):
596
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
597
  provided, text embeddings will be generated from `prompt` input argument.
main/run_tensorrt_controlnet.py CHANGED
@@ -695,7 +695,7 @@ class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
695
  latents (`torch.Tensor`, *optional*):
696
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
697
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
698
- tensor will ge generated by sampling using the supplied random `generator`.
699
  prompt_embeds (`torch.Tensor`, *optional*):
700
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
701
  provided, text embeddings will be generated from `prompt` input argument.
 
695
  latents (`torch.Tensor`, *optional*):
696
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
697
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
698
+ tensor will be generated by sampling using the supplied random `generator`.
699
  prompt_embeds (`torch.Tensor`, *optional*):
700
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
701
  provided, text embeddings will be generated from `prompt` input argument.
main/sd_text2img_k_diffusion.py CHANGED
@@ -326,7 +326,7 @@ class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
326
  latents (`torch.Tensor`, *optional*):
327
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
328
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
329
- tensor will ge generated by sampling using the supplied random `generator`.
330
  output_type (`str`, *optional*, defaults to `"pil"`):
331
  The output format of the generate image. Choose between
332
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
326
  latents (`torch.Tensor`, *optional*):
327
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
328
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
329
+ tensor will be generated by sampling using the supplied random `generator`.
330
  output_type (`str`, *optional*, defaults to `"pil"`):
331
  The output format of the generate image. Choose between
332
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/seed_resize_stable_diffusion.py CHANGED
@@ -122,7 +122,7 @@ class SeedResizeStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
122
  latents (`torch.Tensor`, *optional*):
123
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
124
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
125
- tensor will ge generated by sampling using the supplied random `generator`.
126
  output_type (`str`, *optional*, defaults to `"pil"`):
127
  The output format of the generate image. Choose between
128
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
122
  latents (`torch.Tensor`, *optional*):
123
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
124
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
125
+ tensor will be generated by sampling using the supplied random `generator`.
126
  output_type (`str`, *optional*, defaults to `"pil"`):
127
  The output format of the generate image. Choose between
128
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/stable_diffusion_comparison.py CHANGED
@@ -279,7 +279,7 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline, StableDiffusionMixin)
279
  latents (`torch.Tensor`, optional):
280
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
281
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
282
- tensor will ge generated by sampling using the supplied random `generator`.
283
  output_type (`str`, optional, defaults to `"pil"`):
284
  The output format of the generate image. Choose between
285
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
279
  latents (`torch.Tensor`, optional):
280
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
281
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
282
+ tensor will be generated by sampling using the supplied random `generator`.
283
  output_type (`str`, optional, defaults to `"pil"`):
284
  The output format of the generate image. Choose between
285
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/stable_diffusion_controlnet_img2img.py CHANGED
@@ -670,7 +670,7 @@ class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusio
670
  latents (`torch.Tensor`, *optional*):
671
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
672
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
673
- tensor will ge generated by sampling using the supplied random `generator`.
674
  prompt_embeds (`torch.Tensor`, *optional*):
675
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
676
  provided, text embeddings will be generated from `prompt` input argument.
 
670
  latents (`torch.Tensor`, *optional*):
671
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
672
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
673
+ tensor will be generated by sampling using the supplied random `generator`.
674
  prompt_embeds (`torch.Tensor`, *optional*):
675
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
676
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_controlnet_inpaint.py CHANGED
@@ -810,7 +810,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusio
810
  latents (`torch.Tensor`, *optional*):
811
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
812
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
813
- tensor will ge generated by sampling using the supplied random `generator`.
814
  prompt_embeds (`torch.Tensor`, *optional*):
815
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
816
  provided, text embeddings will be generated from `prompt` input argument.
 
810
  latents (`torch.Tensor`, *optional*):
811
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
812
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
813
+ tensor will be generated by sampling using the supplied random `generator`.
814
  prompt_embeds (`torch.Tensor`, *optional*):
815
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
816
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_controlnet_inpaint_img2img.py CHANGED
@@ -804,7 +804,7 @@ class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline, StableD
804
  latents (`torch.Tensor`, *optional*):
805
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
806
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
807
- tensor will ge generated by sampling using the supplied random `generator`.
808
  prompt_embeds (`torch.Tensor`, *optional*):
809
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
810
  provided, text embeddings will be generated from `prompt` input argument.
 
804
  latents (`torch.Tensor`, *optional*):
805
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
806
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
807
+ tensor will be generated by sampling using the supplied random `generator`.
808
  prompt_embeds (`torch.Tensor`, *optional*):
809
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
810
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_controlnet_reference.py CHANGED
@@ -179,7 +179,7 @@ class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeli
179
  latents (`torch.Tensor`, *optional*):
180
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
181
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
182
- tensor will ge generated by sampling using the supplied random `generator`.
183
  prompt_embeds (`torch.Tensor`, *optional*):
184
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
185
  provided, text embeddings will be generated from `prompt` input argument.
 
179
  latents (`torch.Tensor`, *optional*):
180
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
181
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
182
+ tensor will be generated by sampling using the supplied random `generator`.
183
  prompt_embeds (`torch.Tensor`, *optional*):
184
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
185
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_ipex.py CHANGED
@@ -615,7 +615,7 @@ class StableDiffusionIPEXPipeline(
615
  latents (`torch.Tensor`, *optional*):
616
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
617
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
618
- tensor will ge generated by sampling using the supplied random `generator`.
619
  prompt_embeds (`torch.Tensor`, *optional*):
620
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
621
  provided, text embeddings will be generated from `prompt` input argument.
 
615
  latents (`torch.Tensor`, *optional*):
616
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
617
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
618
+ tensor will be generated by sampling using the supplied random `generator`.
619
  prompt_embeds (`torch.Tensor`, *optional*):
620
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
621
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_reference.py CHANGED
@@ -885,7 +885,7 @@ class StableDiffusionReferencePipeline(
885
  latents (`torch.Tensor`, *optional*):
886
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
887
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
888
- tensor will ge generated by sampling using the supplied random `generator`.
889
  prompt_embeds (`torch.Tensor`, *optional*):
890
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
891
  provided, text embeddings will be generated from `prompt` input argument.
 
885
  latents (`torch.Tensor`, *optional*):
886
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
887
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
888
+ tensor will be generated by sampling using the supplied random `generator`.
889
  prompt_embeds (`torch.Tensor`, *optional*):
890
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
891
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_repaint.py CHANGED
@@ -678,7 +678,7 @@ class StableDiffusionRepaintPipeline(
678
  latents (`torch.Tensor`, *optional*):
679
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
680
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
681
- tensor will ge generated by sampling using the supplied random `generator`.
682
  prompt_embeds (`torch.Tensor`, *optional*):
683
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
684
  provided, text embeddings will be generated from `prompt` input argument.
 
678
  latents (`torch.Tensor`, *optional*):
679
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
680
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
681
+ tensor will be generated by sampling using the supplied random `generator`.
682
  prompt_embeds (`torch.Tensor`, *optional*):
683
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
684
  provided, text embeddings will be generated from `prompt` input argument.
main/stable_diffusion_xl_reference.py CHANGED
@@ -380,7 +380,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
380
  latents (`torch.Tensor`, *optional*):
381
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
382
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
383
- tensor will ge generated by sampling using the supplied random `generator`.
384
  prompt_embeds (`torch.Tensor`, *optional*):
385
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
386
  provided, text embeddings will be generated from `prompt` input argument.
 
380
  latents (`torch.Tensor`, *optional*):
381
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
382
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
383
+ tensor will be generated by sampling using the supplied random `generator`.
384
  prompt_embeds (`torch.Tensor`, *optional*):
385
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
386
  provided, text embeddings will be generated from `prompt` input argument.
main/text_inpainting.py CHANGED
@@ -180,7 +180,7 @@ class TextInpainting(DiffusionPipeline, StableDiffusionMixin):
180
  latents (`torch.Tensor`, *optional*):
181
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
182
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
183
- tensor will ge generated by sampling using the supplied random `generator`.
184
  output_type (`str`, *optional*, defaults to `"pil"`):
185
  The output format of the generate image. Choose between
186
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
 
180
  latents (`torch.Tensor`, *optional*):
181
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
182
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
183
+ tensor will be generated by sampling using the supplied random `generator`.
184
  output_type (`str`, *optional*, defaults to `"pil"`):
185
  The output format of the generate image. Choose between
186
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
main/tiled_upscaling.py CHANGED
@@ -231,7 +231,7 @@ class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline):
231
  latents (`torch.Tensor`, *optional*):
232
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
233
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
234
- tensor will ge generated by sampling using the supplied random `generator`.
235
  tile_size (`int`, *optional*):
236
  The size of the tiles. Too big can result in an OOM-error.
237
  tile_border (`int`, *optional*):
 
231
  latents (`torch.Tensor`, *optional*):
232
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
233
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
234
+ tensor will be generated by sampling using the supplied random `generator`.
235
  tile_size (`int`, *optional*):
236
  The size of the tiles. Too big can result in an OOM-error.
237
  tile_border (`int`, *optional*):