diff --git a/main/composable_stable_diffusion.py b/main/composable_stable_diffusion.py index ec653bcdb4c63289534f55eb365697588a256336..a7c540ceb984d606de3971de275831941f834437 100644 --- a/main/composable_stable_diffusion.py +++ b/main/composable_stable_diffusion.py @@ -398,7 +398,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/imagic_stable_diffusion.py b/main/imagic_stable_diffusion.py index a2561c91985871d8355302ec1b74d4b8a38c2d2a..091d0fbf8d3ae74e04e1333d7eed044d12e456da 100644 --- a/main/imagic_stable_diffusion.py +++ b/main/imagic_stable_diffusion.py @@ -147,7 +147,7 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. diff --git a/main/img2img_inpainting.py b/main/img2img_inpainting.py index 7b9bd043d09956cd77134276e9fa5ea0f9f3a5d7..499230b1e2cd536ff9db0b14e13f7de173e8beda 100644 --- a/main/img2img_inpainting.py +++ b/main/img2img_inpainting.py @@ -197,7 +197,7 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/interpolate_stable_diffusion.py b/main/interpolate_stable_diffusion.py index 460bb464f3b1ff35a3a142c04774929541053d5c..5b96c14d63670bcf46b4a9990baa84a4baadf62a 100644 --- a/main/interpolate_stable_diffusion.py +++ b/main/interpolate_stable_diffusion.py @@ -173,7 +173,7 @@ class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/lpw_stable_diffusion.py b/main/lpw_stable_diffusion.py index ccb17a51e61534e6d700494284c38916b8907892..cb017c0bbe297cfc2a1934f996ff5b7362239951 100644 --- a/main/lpw_stable_diffusion.py +++ b/main/lpw_stable_diffusion.py @@ -888,7 +888,7 @@ class StableDiffusionLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1131,7 +1131,7 @@ class StableDiffusionLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/lpw_stable_diffusion_onnx.py b/main/lpw_stable_diffusion_onnx.py index ab1462b81b39cd8f3ae433f21a51c33e8beefce3..92effc1933298f97d7af99e9b551069071cfee91 100644 --- a/main/lpw_stable_diffusion_onnx.py +++ b/main/lpw_stable_diffusion_onnx.py @@ -721,7 +721,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): @@ -918,7 +918,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): diff --git a/main/lpw_stable_diffusion_xl.py b/main/lpw_stable_diffusion_xl.py index ea67738ab74c6b193e06bcdee1cde8883087d6c3..272c5d5652c5ff1d1319e62c88649b4d442794bc 100644 --- a/main/lpw_stable_diffusion_xl.py +++ b/main/lpw_stable_diffusion_xl.py @@ -1519,7 +1519,7 @@ class SDXLLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): diff --git a/main/multilingual_stable_diffusion.py b/main/multilingual_stable_diffusion.py index 5e7453ed12019fee6d24104a76c3a827bf0da99b..afef4e9e9719faa676bd95ce1b05a8c382a15097 100644 --- a/main/multilingual_stable_diffusion.py +++ b/main/multilingual_stable_diffusion.py @@ -187,7 +187,7 @@ class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/pipeline_controlnet_xl_kolors.py b/main/pipeline_controlnet_xl_kolors.py index af5586990e2ef307a98e0b2c726fb142047d6e9e..dc90aacdbc6bef85d8664c29cd9b3b9a3b41c8ae 100644 --- a/main/pipeline_controlnet_xl_kolors.py +++ b/main/pipeline_controlnet_xl_kolors.py @@ -888,7 +888,7 @@ class KolorsControlNetPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_controlnet_xl_kolors_img2img.py b/main/pipeline_controlnet_xl_kolors_img2img.py index c0831945ed8e858684191196c856ea5bd40f52bc..189d0312143fee6ade8f88036e050c8d2104405d 100644 --- a/main/pipeline_controlnet_xl_kolors_img2img.py +++ b/main/pipeline_controlnet_xl_kolors_img2img.py @@ -1066,7 +1066,7 @@ class KolorsControlNetImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_controlnet_xl_kolors_inpaint.py b/main/pipeline_controlnet_xl_kolors_inpaint.py index db15d99ac3eac9d6b880041d9ccd3f72aa47fa6f..4b6123cc1f8bb1fef6699211ab350d8794618af7 100644 --- a/main/pipeline_controlnet_xl_kolors_inpaint.py +++ b/main/pipeline_controlnet_xl_kolors_inpaint.py @@ -1298,7 +1298,7 @@ class KolorsControlNetInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/pipeline_demofusion_sdxl.py b/main/pipeline_demofusion_sdxl.py index c9b57a6ece8c8f8717e544762a96b07483c2b172..119b39cefe682efa0697c95e495cffe77362ec05 100644 --- a/main/pipeline_demofusion_sdxl.py +++ b/main/pipeline_demofusion_sdxl.py @@ -724,7 +724,7 @@ class DemoFusionSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_faithdiff_stable_diffusion_xl.py b/main/pipeline_faithdiff_stable_diffusion_xl.py index 43ef55d32c3de7ba3de0f9bd265c0b813eda5bc3..aa95d2ec719eaeede6ce0c340c92ac8562c88076 100644 --- a/main/pipeline_faithdiff_stable_diffusion_xl.py +++ b/main/pipeline_faithdiff_stable_diffusion_xl.py @@ -1906,7 +1906,7 @@ class FaithDiffStableDiffusionXLPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_flux_differential_img2img.py b/main/pipeline_flux_differential_img2img.py index 7d6358cb3258de9749d8a7c2e650354edc116e2f..3677e73136f79a9539f66ad73774bb5b08669dec 100644 --- a/main/pipeline_flux_differential_img2img.py +++ b/main/pipeline_flux_differential_img2img.py @@ -730,7 +730,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -769,7 +769,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_flux_kontext_multiple_images.py b/main/pipeline_flux_kontext_multiple_images.py index ef0c643a405e8a4adfb5b70da6393d3daa85cce0..7e4a9ed0fadcf0f5798d4b54d1d3743ce8321352 100644 --- a/main/pipeline_flux_kontext_multiple_images.py +++ b/main/pipeline_flux_kontext_multiple_images.py @@ -885,7 +885,7 @@ class FluxKontextPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_flux_rf_inversion.py b/main/pipeline_flux_rf_inversion.py index 631d04b762d4f37dff64b87d45c5ec830db13a50..8f8b4817acf2ad82d7891048b0bdac9cdd8042ea 100644 --- a/main/pipeline_flux_rf_inversion.py +++ b/main/pipeline_flux_rf_inversion.py @@ -711,7 +711,7 @@ class RFInversionFluxPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_flux_semantic_guidance.py b/main/pipeline_flux_semantic_guidance.py index 93bcd3af75e62ff981eee22b3fe96323ec63028d..b3d2b3a4b4e1f352438ac007e9aad890f095cebe 100644 --- a/main/pipeline_flux_semantic_guidance.py +++ b/main/pipeline_flux_semantic_guidance.py @@ -853,7 +853,7 @@ class FluxSemanticGuidancePipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_flux_with_cfg.py b/main/pipeline_flux_with_cfg.py index 1b8dc9ecb85e9feadd36764330373440b9257824..3916aff257f039cd71798d23ae41ab27e4506925 100644 --- a/main/pipeline_flux_with_cfg.py +++ b/main/pipeline_flux_with_cfg.py @@ -639,7 +639,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_kolors_differential_img2img.py b/main/pipeline_kolors_differential_img2img.py index 9491447409e2f70c344e59832c4e6641e3492bcd..d299c839815ed86564561cc748e08d4fff84dd38 100644 --- a/main/pipeline_kolors_differential_img2img.py +++ b/main/pipeline_kolors_differential_img2img.py @@ -904,7 +904,7 @@ class KolorsDifferentialImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_kolors_inpainting.py b/main/pipeline_kolors_inpainting.py index cce9f10ded3d0a12d2d8b489e042ff6a506da84c..3cab8ecac002c4764b9c5cf9a29a8a4d389e6260 100644 --- a/main/pipeline_kolors_inpainting.py +++ b/main/pipeline_kolors_inpainting.py @@ -1246,7 +1246,7 @@ class KolorsInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/pipeline_prompt2prompt.py b/main/pipeline_prompt2prompt.py index 065edc0cfbe86a6e00eb09e4ed939ed62655e77d..8d94dc9248c17486590b66a64e38a68523880037 100644 --- a/main/pipeline_prompt2prompt.py +++ b/main/pipeline_prompt2prompt.py @@ -611,7 +611,7 @@ class Prompt2PromptPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/pipeline_sdxl_style_aligned.py b/main/pipeline_sdxl_style_aligned.py index ea168036c196bfd1312b5d8de521114a4ecf6909..10438af365f9ba3e453d4e10ec9e877bced0a916 100644 --- a/main/pipeline_sdxl_style_aligned.py +++ b/main/pipeline_sdxl_style_aligned.py @@ -1480,7 +1480,7 @@ class StyleAlignedSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_3_differential_img2img.py b/main/pipeline_stable_diffusion_3_differential_img2img.py index 693485d1758decf2d6042278fea8fcd57429d511..643386232bc3d6797fc6db385efd3c374788f7ab 100644 --- a/main/pipeline_stable_diffusion_3_differential_img2img.py +++ b/main/pipeline_stable_diffusion_3_differential_img2img.py @@ -748,7 +748,7 @@ class StableDiffusion3DifferentialImg2ImgPipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_3_instruct_pix2pix.py b/main/pipeline_stable_diffusion_3_instruct_pix2pix.py index 6923db23a6d36c8551e4ebab34ec04688585bbff..d9cee800e8ad47a6ed98ccd021fbf32b38e058db 100644 --- a/main/pipeline_stable_diffusion_3_instruct_pix2pix.py +++ b/main/pipeline_stable_diffusion_3_instruct_pix2pix.py @@ -945,7 +945,7 @@ class StableDiffusion3InstructPix2PixPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_xl_attentive_eraser.py b/main/pipeline_stable_diffusion_xl_attentive_eraser.py index ab8064c6e378a9916002f7d26135d462ad970921..a881814c2a9100b39a5ecb65868d89be96c7930a 100644 --- a/main/pipeline_stable_diffusion_xl_attentive_eraser.py +++ b/main/pipeline_stable_diffusion_xl_attentive_eraser.py @@ -1786,7 +1786,7 @@ class StableDiffusionXL_AE_Pipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/pipeline_stable_diffusion_xl_controlnet_adapter.py b/main/pipeline_stable_diffusion_xl_controlnet_adapter.py index ccf1098c614cc5a5e0d4edcdf92e784ee634cc18..564a19e923d23f3c5a61b1fc729c552a37bbe1b8 100644 --- a/main/pipeline_stable_diffusion_xl_controlnet_adapter.py +++ b/main/pipeline_stable_diffusion_xl_controlnet_adapter.py @@ -973,7 +973,7 @@ class StableDiffusionXLControlNetAdapterPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py b/main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py index 38db19148d43fde2c6e4811175bd8d9f2139fcea..c73433b20f88ddf3971ab7dbf6439ee51878b7f0 100644 --- a/main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +++ b/main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py @@ -1329,7 +1329,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_xl_differential_img2img.py b/main/pipeline_stable_diffusion_xl_differential_img2img.py index b9f00cb82d838740e09a08b3aec082fcb85a82d1..89388e10cb193d5a64e5276f52962b8b15f0acb1 100644 --- a/main/pipeline_stable_diffusion_xl_differential_img2img.py +++ b/main/pipeline_stable_diffusion_xl_differential_img2img.py @@ -1053,7 +1053,7 @@ class StableDiffusionXLDifferentialImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stable_diffusion_xl_ipex.py b/main/pipeline_stable_diffusion_xl_ipex.py index eda6089f594fd474a9fc3dc937e0233e928cd907..aa2b24f3965ac5e1d8d2ff9ba5d8489b2855e104 100644 --- a/main/pipeline_stable_diffusion_xl_ipex.py +++ b/main/pipeline_stable_diffusion_xl_ipex.py @@ -832,7 +832,7 @@ class StableDiffusionXLPipelineIpex( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stg_cogvideox.py b/main/pipeline_stg_cogvideox.py index 1c98ae0f6d8ebe31936246b01e6f16500c7bfda8..bdb6aecc30c3034f7a1e8cc31aba4683340e5b75 100644 --- a/main/pipeline_stg_cogvideox.py +++ b/main/pipeline_stg_cogvideox.py @@ -632,7 +632,7 @@ class CogVideoXSTGPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stg_ltx.py b/main/pipeline_stg_ltx.py index f7ccf99e96aebea13d4fd91525b412805759ded1..70069a33f5d9c94aad535b8faa783f7e84fded75 100644 --- a/main/pipeline_stg_ltx.py +++ b/main/pipeline_stg_ltx.py @@ -620,7 +620,7 @@ class LTXSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderM latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stg_ltx_image2video.py b/main/pipeline_stg_ltx_image2video.py index 3b3d2333805d2fd238d6671bfb677ae318a64675..c32805e1419ff7cdec492ecc363bacfbcc3293f2 100644 --- a/main/pipeline_stg_ltx_image2video.py +++ b/main/pipeline_stg_ltx_image2video.py @@ -682,7 +682,7 @@ class LTXImageToVideoSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVide latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_stg_mochi.py b/main/pipeline_stg_mochi.py index b6ab1b192c1e63d3fff41643297a918875e93299..dbe5d2525ad32e6efc1013ff8569013edd4ddc41 100644 --- a/main/pipeline_stg_mochi.py +++ b/main/pipeline_stg_mochi.py @@ -603,7 +603,7 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/pipeline_zero1to3.py b/main/pipeline_zero1to3.py index 0db543b1697c28ca489f077276655c9f7c69298f..9e29566978e8c4bbcc041da420ebbc4d257f5035 100644 --- a/main/pipeline_zero1to3.py +++ b/main/pipeline_zero1to3.py @@ -657,7 +657,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/rerender_a_video.py b/main/rerender_a_video.py index 133c23294395f2e8b414185f0d19b4df9ed8e9d1..78a15a03b0996972d28f5d57bfbc2496d7f1dff9 100644 --- a/main/rerender_a_video.py +++ b/main/rerender_a_video.py @@ -656,7 +656,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/run_onnx_controlnet.py b/main/run_onnx_controlnet.py index 2221fc09dbdecfe2373bafd440e36ee5328a90bb..f0ab2a2b964355159a391ea2e3bfcce538417a8c 100644 --- a/main/run_onnx_controlnet.py +++ b/main/run_onnx_controlnet.py @@ -591,7 +591,7 @@ class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/run_tensorrt_controlnet.py b/main/run_tensorrt_controlnet.py index b9e71724c04649079d64a8164b8dd7e8b0c6da60..e4f1abc83b0bfaa87a229b972eff5b798f671353 100644 --- a/main/run_tensorrt_controlnet.py +++ b/main/run_tensorrt_controlnet.py @@ -695,7 +695,7 @@ class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/sd_text2img_k_diffusion.py b/main/sd_text2img_k_diffusion.py index ab6cf2d9cd3f7f0e8435bb6cd7fc7d6da0e4d990..4d5cea497f8c1f8bbc71c57f6445e52ddf7c5f53 100644 --- a/main/sd_text2img_k_diffusion.py +++ b/main/sd_text2img_k_diffusion.py @@ -326,7 +326,7 @@ class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/seed_resize_stable_diffusion.py b/main/seed_resize_stable_diffusion.py index 3c823012c10208ae6f9803a010cfe3c91e38979e..eafe7572aab51278c1dafc4cce49280f6b91e905 100644 --- a/main/seed_resize_stable_diffusion.py +++ b/main/seed_resize_stable_diffusion.py @@ -122,7 +122,7 @@ class SeedResizeStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/stable_diffusion_comparison.py b/main/stable_diffusion_comparison.py index 36e7dba2de6260f3575965d6ddb49f3f299f340d..22f3b3e0c385cc10036182b9ead86bbe8ba2fa5f 100644 --- a/main/stable_diffusion_comparison.py +++ b/main/stable_diffusion_comparison.py @@ -279,7 +279,7 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, optional): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, optional, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/stable_diffusion_controlnet_img2img.py b/main/stable_diffusion_controlnet_img2img.py index 877464454a618da3d25cf83dcb6389e1edea0b8e..6d8038cfd4ae86887f35cfd682559703e6505687 100644 --- a/main/stable_diffusion_controlnet_img2img.py +++ b/main/stable_diffusion_controlnet_img2img.py @@ -670,7 +670,7 @@ class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusio latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_controlnet_inpaint.py b/main/stable_diffusion_controlnet_inpaint.py index 175c47d015235fd45e5f5771c69fc0ac005c9236..fe7b808b6beb3dbdc02105bb3c27cad8fa43751d 100644 --- a/main/stable_diffusion_controlnet_inpaint.py +++ b/main/stable_diffusion_controlnet_inpaint.py @@ -810,7 +810,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusio latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_controlnet_inpaint_img2img.py b/main/stable_diffusion_controlnet_inpaint_img2img.py index 51e7ac38dd5402eecb977c7a8af7fc8194d4131d..2b5dc77fe5aa8ecedb4f2f8ff332e03a87c167f0 100644 --- a/main/stable_diffusion_controlnet_inpaint_img2img.py +++ b/main/stable_diffusion_controlnet_inpaint_img2img.py @@ -804,7 +804,7 @@ class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline, StableD latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_controlnet_reference.py b/main/stable_diffusion_controlnet_reference.py index aa9ab1b2421101bc376c1ed9021bd2b564408822..e5dd249e0424cf0414f985f98187f0dc76684550 100644 --- a/main/stable_diffusion_controlnet_reference.py +++ b/main/stable_diffusion_controlnet_reference.py @@ -179,7 +179,7 @@ class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeli latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_ipex.py b/main/stable_diffusion_ipex.py index 18d5e8feaa43e189de94265ef3ede2518b74547e..7d1cd4f5d09eebbf528f3c74a487d3ae447867cd 100644 --- a/main/stable_diffusion_ipex.py +++ b/main/stable_diffusion_ipex.py @@ -615,7 +615,7 @@ class StableDiffusionIPEXPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_reference.py b/main/stable_diffusion_reference.py index 69fa0722cf8a5de034eb95a2fade23beaa13cc36..6f7dce982339ae36418d4902d2c88c806e685dcf 100644 --- a/main/stable_diffusion_reference.py +++ b/main/stable_diffusion_reference.py @@ -885,7 +885,7 @@ class StableDiffusionReferencePipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_repaint.py b/main/stable_diffusion_repaint.py index 9f6172f3b838fa0173b24aca415b45df9b9a56a0..94b9f8b01b51ecd44db3db09bc9cae743a8febf5 100644 --- a/main/stable_diffusion_repaint.py +++ b/main/stable_diffusion_repaint.py @@ -678,7 +678,7 @@ class StableDiffusionRepaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/stable_diffusion_xl_reference.py b/main/stable_diffusion_xl_reference.py index 11926a5d9ac9b5093ce8af1a928411eefadd1fea..eb055574966d0ebc02332e8b3fbbaaa741d86046 100644 --- a/main/stable_diffusion_xl_reference.py +++ b/main/stable_diffusion_xl_reference.py @@ -380,7 +380,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/main/text_inpainting.py b/main/text_inpainting.py index 2908388029dd8021cea80133f48fde0d0d770e7a..f262cf2cac6d0a15f5da98efba859183318ee80d 100644 --- a/main/text_inpainting.py +++ b/main/text_inpainting.py @@ -180,7 +180,7 @@ class TextInpainting(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/main/tiled_upscaling.py b/main/tiled_upscaling.py index 56eb3e89b5d02362b39c3f624af989f2789aaa4d..7a5e77155cd0b82363fcbb89c5c72fb0250b29d5 100644 --- a/main/tiled_upscaling.py +++ b/main/tiled_upscaling.py @@ -231,7 +231,7 @@ class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): diff --git a/main/wildcard_stable_diffusion.py b/main/wildcard_stable_diffusion.py index c750610ca34fa3cb3bc07aca8001b183e6f47226..d40221e5b1cffd7d5b14e8963df82f94139f100d 100644 --- a/main/wildcard_stable_diffusion.py +++ b/main/wildcard_stable_diffusion.py @@ -209,7 +209,7 @@ class WildcardStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.