|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import importlib |
|
import inspect |
|
import os |
|
from dataclasses import dataclass |
|
from pathlib import Path |
|
from typing import Any, Dict, List, Optional, Union |
|
|
|
import numpy as np |
|
import torch |
|
|
|
import diffusers |
|
import PIL |
|
from huggingface_hub import snapshot_download |
|
from packaging import version |
|
from PIL import Image |
|
from tqdm.auto import tqdm |
|
|
|
from .configuration_utils import ConfigMixin |
|
from .dynamic_modules_utils import get_class_from_dynamic_module |
|
from .hub_utils import http_user_agent |
|
from .modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT |
|
from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME |
|
from .utils import ( |
|
CONFIG_NAME, |
|
DIFFUSERS_CACHE, |
|
ONNX_WEIGHTS_NAME, |
|
WEIGHTS_NAME, |
|
BaseOutput, |
|
deprecate, |
|
is_accelerate_available, |
|
is_torch_version, |
|
is_transformers_available, |
|
logging, |
|
) |
|
|
|
|
|
if is_transformers_available(): |
|
import transformers |
|
from transformers import PreTrainedModel |
|
|
|
|
|
INDEX_FILE = "diffusion_pytorch_model.bin" |
|
CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" |
|
DUMMY_MODULES_FOLDER = "diffusers.utils" |
|
TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
LOADABLE_CLASSES = { |
|
"diffusers": { |
|
"ModelMixin": ["save_pretrained", "from_pretrained"], |
|
"SchedulerMixin": ["save_pretrained", "from_pretrained"], |
|
"DiffusionPipeline": ["save_pretrained", "from_pretrained"], |
|
"OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], |
|
}, |
|
"transformers": { |
|
"PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], |
|
"PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], |
|
"PreTrainedModel": ["save_pretrained", "from_pretrained"], |
|
"FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], |
|
"ProcessorMixin": ["save_pretrained", "from_pretrained"], |
|
"ImageProcessingMixin": ["save_pretrained", "from_pretrained"], |
|
}, |
|
"onnxruntime.training": { |
|
"ORTModule": ["save_pretrained", "from_pretrained"], |
|
}, |
|
} |
|
|
|
ALL_IMPORTABLE_CLASSES = {} |
|
for library in LOADABLE_CLASSES: |
|
ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) |
|
|
|
|
|
@dataclass |
|
class ImagePipelineOutput(BaseOutput): |
|
""" |
|
Output class for image pipelines. |
|
|
|
Args: |
|
images (`List[PIL.Image.Image]` or `np.ndarray`) |
|
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, |
|
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. |
|
""" |
|
|
|
images: Union[List[PIL.Image.Image], np.ndarray] |
|
|
|
|
|
@dataclass |
|
class AudioPipelineOutput(BaseOutput): |
|
""" |
|
Output class for audio pipelines. |
|
|
|
Args: |
|
audios (`np.ndarray`) |
|
List of denoised samples of shape `(batch_size, num_channels, sample_rate)`. Numpy array present the |
|
denoised audio samples of the diffusion pipeline. |
|
""" |
|
|
|
audios: np.ndarray |
|
|
|
|
|
class DiffusionPipeline(ConfigMixin): |
|
r""" |
|
Base class for all models. |
|
|
|
[`DiffusionPipeline`] takes care of storing all components (models, schedulers, processors) for diffusion pipelines |
|
and handles methods for loading, downloading and saving models as well as a few methods common to all pipelines to: |
|
|
|
- move all PyTorch modules to the device of your choice |
|
- enabling/disabling the progress bar for the denoising iteration |
|
|
|
Class attributes: |
|
|
|
- **config_name** ([`str`]) -- name of the config file that will store the class and module names of all |
|
components of the diffusion pipeline. |
|
""" |
|
config_name = "model_index.json" |
|
|
|
def register_modules(self, **kwargs): |
|
|
|
from diffusers import pipelines |
|
|
|
for name, module in kwargs.items(): |
|
|
|
if module is None: |
|
register_dict = {name: (None, None)} |
|
else: |
|
library = module.__module__.split(".")[0] |
|
|
|
|
|
pipeline_dir = module.__module__.split(".")[-2] if len(module.__module__.split(".")) > 2 else None |
|
path = module.__module__.split(".") |
|
is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) |
|
|
|
|
|
|
|
|
|
if library not in LOADABLE_CLASSES or is_pipeline_module: |
|
library = pipeline_dir |
|
|
|
|
|
class_name = module.__class__.__name__ |
|
|
|
register_dict = {name: (library, class_name)} |
|
|
|
|
|
self.register_to_config(**register_dict) |
|
|
|
|
|
setattr(self, name, module) |
|
|
|
def save_pretrained(self, save_directory: Union[str, os.PathLike]): |
|
""" |
|
Save all variables of the pipeline that can be saved and loaded as well as the pipelines configuration file to |
|
a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading |
|
method. The pipeline can easily be re-loaded using the `[`~DiffusionPipeline.from_pretrained`]` class method. |
|
|
|
Arguments: |
|
save_directory (`str` or `os.PathLike`): |
|
Directory to which to save. Will be created if it doesn't exist. |
|
""" |
|
self.save_config(save_directory) |
|
|
|
model_index_dict = dict(self.config) |
|
model_index_dict.pop("_class_name") |
|
model_index_dict.pop("_diffusers_version") |
|
model_index_dict.pop("_module", None) |
|
|
|
for pipeline_component_name in model_index_dict.keys(): |
|
sub_model = getattr(self, pipeline_component_name) |
|
if sub_model is None: |
|
|
|
continue |
|
|
|
model_cls = sub_model.__class__ |
|
|
|
save_method_name = None |
|
|
|
for library_name, library_classes in LOADABLE_CLASSES.items(): |
|
library = importlib.import_module(library_name) |
|
for base_class, save_load_methods in library_classes.items(): |
|
class_candidate = getattr(library, base_class, None) |
|
if class_candidate is not None and issubclass(model_cls, class_candidate): |
|
|
|
save_method_name = save_load_methods[0] |
|
break |
|
if save_method_name is not None: |
|
break |
|
|
|
save_method = getattr(sub_model, save_method_name) |
|
save_method(os.path.join(save_directory, pipeline_component_name)) |
|
|
|
def to(self, torch_device: Optional[Union[str, torch.device]] = None): |
|
if torch_device is None: |
|
return self |
|
|
|
module_names, _, _ = self.extract_init_dict(dict(self.config)) |
|
for name in module_names.keys(): |
|
module = getattr(self, name) |
|
if isinstance(module, torch.nn.Module): |
|
if module.dtype == torch.float16 and str(torch_device) in ["cpu"]: |
|
logger.warning( |
|
"Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It" |
|
" is not recommended to move them to `cpu` as running them will fail. Please make" |
|
" sure to use an accelerator to run the pipeline in inference, due to the lack of" |
|
" support for`float16` operations on this device in PyTorch. Please, remove the" |
|
" `torch_dtype=torch.float16` argument, or use another device for inference." |
|
) |
|
module.to(torch_device) |
|
return self |
|
|
|
@property |
|
def device(self) -> torch.device: |
|
r""" |
|
Returns: |
|
`torch.device`: The torch device on which the pipeline is located. |
|
""" |
|
module_names, _, _ = self.extract_init_dict(dict(self.config)) |
|
for name in module_names.keys(): |
|
module = getattr(self, name) |
|
if isinstance(module, torch.nn.Module): |
|
return module.device |
|
return torch.device("cpu") |
|
|
|
@classmethod |
|
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): |
|
r""" |
|
Instantiate a PyTorch diffusion pipeline from pre-trained pipeline weights. |
|
|
|
The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). |
|
|
|
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come |
|
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning |
|
task. |
|
|
|
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those |
|
weights are discarded. |
|
|
|
Parameters: |
|
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): |
|
Can be either: |
|
|
|
- A string, the *repo id* of a pretrained pipeline hosted inside a model repo on |
|
https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like |
|
`CompVis/ldm-text2im-large-256`. |
|
- A path to a *directory* containing pipeline weights saved using |
|
[`~DiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. |
|
torch_dtype (`str` or `torch.dtype`, *optional*): |
|
Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype |
|
will be automatically derived from the model's weights. |
|
custom_pipeline (`str`, *optional*): |
|
|
|
<Tip warning={true}> |
|
|
|
This is an experimental feature and is likely to change in the future. |
|
|
|
</Tip> |
|
|
|
Can be either: |
|
|
|
- A string, the *repo id* of a custom pipeline hosted inside a model repo on |
|
https://huggingface.co/. Valid repo ids have to be located under a user or organization name, |
|
like `hf-internal-testing/diffusers-dummy-pipeline`. |
|
|
|
<Tip> |
|
|
|
It is required that the model repo has a file, called `pipeline.py` that defines the custom |
|
pipeline. |
|
|
|
</Tip> |
|
|
|
- A string, the *file name* of a community pipeline hosted on GitHub under |
|
https://github.com/huggingface/diffusers/tree/main/examples/community. Valid file names have to |
|
match exactly the file name without `.py` located under the above link, *e.g.* |
|
`clip_guided_stable_diffusion`. |
|
|
|
<Tip> |
|
|
|
Community pipelines are always loaded from the current `main` branch of GitHub. |
|
|
|
</Tip> |
|
|
|
- A path to a *directory* containing a custom pipeline, e.g., `./my_pipeline_directory/`. |
|
|
|
<Tip> |
|
|
|
It is required that the directory has a file, called `pipeline.py` that defines the custom |
|
pipeline. |
|
|
|
</Tip> |
|
|
|
For more information on how to load and create custom pipelines, please have a look at [Loading and |
|
Adding Custom |
|
Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) |
|
|
|
torch_dtype (`str` or `torch.dtype`, *optional*): |
|
force_download (`bool`, *optional*, defaults to `False`): |
|
Whether or not to force the (re-)download of the model weights and configuration files, overriding the |
|
cached versions if they exist. |
|
resume_download (`bool`, *optional*, defaults to `False`): |
|
Whether or not to delete incompletely received files. Will attempt to resume the download if such a |
|
file exists. |
|
proxies (`Dict[str, str]`, *optional*): |
|
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', |
|
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. |
|
output_loading_info(`bool`, *optional*, defaults to `False`): |
|
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. |
|
local_files_only(`bool`, *optional*, defaults to `False`): |
|
Whether or not to only look at local files (i.e., do not try to download the model). |
|
use_auth_token (`str` or *bool*, *optional*): |
|
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated |
|
when running `huggingface-cli login` (stored in `~/.huggingface`). |
|
revision (`str`, *optional*, defaults to `"main"`): |
|
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a |
|
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any |
|
identifier allowed by git. |
|
mirror (`str`, *optional*): |
|
Mirror source to accelerate downloads in China. If you are from China and have an accessibility |
|
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. |
|
Please refer to the mirror site for more information. specify the folder name here. |
|
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): |
|
A map that specifies where each submodule should go. It doesn't need to be refined to each |
|
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the |
|
same device. |
|
|
|
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For |
|
more information about each option see [designing a device |
|
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). |
|
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): |
|
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This |
|
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the |
|
model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, |
|
setting this argument to `True` will raise an error. |
|
|
|
kwargs (remaining dictionary of keyword arguments, *optional*): |
|
Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the |
|
specific pipeline class. The overwritten components are then directly passed to the pipelines |
|
`__init__` method. See example below for more information. |
|
|
|
<Tip> |
|
|
|
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated |
|
models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` |
|
|
|
</Tip> |
|
|
|
<Tip> |
|
|
|
Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use |
|
this method in a firewalled environment. |
|
|
|
</Tip> |
|
|
|
Examples: |
|
|
|
```py |
|
>>> from diffusers import DiffusionPipeline |
|
|
|
>>> # Download pipeline from huggingface.co and cache. |
|
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") |
|
|
|
>>> # Download pipeline that requires an authorization token |
|
>>> # For more information on access tokens, please refer to this section |
|
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) |
|
>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
|
|
|
>>> # Use a different scheduler |
|
>>> from diffusers import LMSDiscreteScheduler |
|
|
|
>>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) |
|
>>> pipeline.scheduler = scheduler |
|
``` |
|
""" |
|
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) |
|
resume_download = kwargs.pop("resume_download", False) |
|
force_download = kwargs.pop("force_download", False) |
|
proxies = kwargs.pop("proxies", None) |
|
local_files_only = kwargs.pop("local_files_only", False) |
|
use_auth_token = kwargs.pop("use_auth_token", None) |
|
revision = kwargs.pop("revision", None) |
|
torch_dtype = kwargs.pop("torch_dtype", None) |
|
custom_pipeline = kwargs.pop("custom_pipeline", None) |
|
provider = kwargs.pop("provider", None) |
|
sess_options = kwargs.pop("sess_options", None) |
|
device_map = kwargs.pop("device_map", None) |
|
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) |
|
|
|
if low_cpu_mem_usage and not is_accelerate_available(): |
|
low_cpu_mem_usage = False |
|
logger.warn( |
|
"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" |
|
" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" |
|
" `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" |
|
" install accelerate\n```\n." |
|
) |
|
|
|
if device_map is not None and not is_torch_version(">=", "1.9.0"): |
|
raise NotImplementedError( |
|
"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" |
|
" `device_map=None`." |
|
) |
|
|
|
if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): |
|
raise NotImplementedError( |
|
"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" |
|
" `low_cpu_mem_usage=False`." |
|
) |
|
|
|
if low_cpu_mem_usage is False and device_map is not None: |
|
raise ValueError( |
|
f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and" |
|
" dispatching. Please make sure to set `low_cpu_mem_usage=True`." |
|
) |
|
|
|
|
|
|
|
if not os.path.isdir(pretrained_model_name_or_path): |
|
config_dict = cls.load_config( |
|
pretrained_model_name_or_path, |
|
cache_dir=cache_dir, |
|
resume_download=resume_download, |
|
force_download=force_download, |
|
proxies=proxies, |
|
local_files_only=local_files_only, |
|
use_auth_token=use_auth_token, |
|
revision=revision, |
|
) |
|
|
|
folder_names = [k for k in config_dict.keys() if not k.startswith("_")] |
|
allow_patterns = [os.path.join(k, "*") for k in folder_names] |
|
allow_patterns += [WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, ONNX_WEIGHTS_NAME, cls.config_name] |
|
|
|
|
|
ignore_patterns = "*.msgpack" |
|
|
|
if custom_pipeline is not None: |
|
allow_patterns += [CUSTOM_PIPELINE_FILE_NAME] |
|
|
|
if cls != DiffusionPipeline: |
|
requested_pipeline_class = cls.__name__ |
|
else: |
|
requested_pipeline_class = config_dict.get("_class_name", cls.__name__) |
|
user_agent = {"pipeline_class": requested_pipeline_class} |
|
if custom_pipeline is not None: |
|
user_agent["custom_pipeline"] = custom_pipeline |
|
user_agent = http_user_agent(user_agent) |
|
|
|
|
|
cached_folder = snapshot_download( |
|
pretrained_model_name_or_path, |
|
cache_dir=cache_dir, |
|
resume_download=resume_download, |
|
proxies=proxies, |
|
local_files_only=local_files_only, |
|
use_auth_token=use_auth_token, |
|
revision=revision, |
|
allow_patterns=allow_patterns, |
|
ignore_patterns=ignore_patterns, |
|
user_agent=user_agent, |
|
) |
|
else: |
|
cached_folder = pretrained_model_name_or_path |
|
|
|
config_dict = cls.load_config(cached_folder) |
|
|
|
|
|
|
|
if custom_pipeline is not None: |
|
if custom_pipeline.endswith(".py"): |
|
path = Path(custom_pipeline) |
|
|
|
file_name = path.name |
|
custom_pipeline = path.parent.absolute() |
|
else: |
|
file_name = CUSTOM_PIPELINE_FILE_NAME |
|
|
|
pipeline_class = get_class_from_dynamic_module( |
|
custom_pipeline, module_file=file_name, cache_dir=custom_pipeline |
|
) |
|
elif cls != DiffusionPipeline: |
|
pipeline_class = cls |
|
else: |
|
diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) |
|
pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) |
|
|
|
|
|
if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( |
|
version.parse(config_dict["_diffusers_version"]).base_version |
|
) <= version.parse("0.5.1"): |
|
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy |
|
|
|
pipeline_class = StableDiffusionInpaintPipelineLegacy |
|
|
|
deprecation_message = ( |
|
"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" |
|
f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" |
|
" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" |
|
" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" |
|
f" checkpoint {pretrained_model_name_or_path} to the format of" |
|
" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" |
|
" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." |
|
) |
|
deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) |
|
|
|
|
|
|
|
|
|
expected_modules = set(inspect.signature(pipeline_class.__init__).parameters.keys()) - set(["self"]) |
|
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} |
|
|
|
init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) |
|
|
|
if len(unused_kwargs) > 0: |
|
logger.warning(f"Keyword arguments {unused_kwargs} not recognized.") |
|
|
|
init_kwargs = {} |
|
|
|
|
|
from diffusers import pipelines |
|
|
|
|
|
for name, (library_name, class_name) in init_dict.items(): |
|
if class_name is None: |
|
|
|
init_kwargs[name] = None |
|
continue |
|
|
|
|
|
if class_name.startswith("Flax"): |
|
class_name = class_name[4:] |
|
|
|
is_pipeline_module = hasattr(pipelines, library_name) |
|
loaded_sub_model = None |
|
sub_model_should_be_defined = True |
|
|
|
|
|
if name in passed_class_obj: |
|
|
|
if not is_pipeline_module and passed_class_obj[name] is not None: |
|
library = importlib.import_module(library_name) |
|
class_obj = getattr(library, class_name) |
|
importable_classes = LOADABLE_CLASSES[library_name] |
|
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} |
|
|
|
expected_class_obj = None |
|
for class_name, class_candidate in class_candidates.items(): |
|
if class_candidate is not None and issubclass(class_obj, class_candidate): |
|
expected_class_obj = class_candidate |
|
|
|
if not issubclass(passed_class_obj[name].__class__, expected_class_obj): |
|
raise ValueError( |
|
f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be" |
|
f" {expected_class_obj}" |
|
) |
|
elif passed_class_obj[name] is None: |
|
logger.warn( |
|
f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note" |
|
f" that this might lead to problems when using {pipeline_class} and is not recommended." |
|
) |
|
sub_model_should_be_defined = False |
|
else: |
|
logger.warn( |
|
f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" |
|
" has the correct type" |
|
) |
|
|
|
|
|
loaded_sub_model = passed_class_obj[name] |
|
elif is_pipeline_module: |
|
pipeline_module = getattr(pipelines, library_name) |
|
class_obj = getattr(pipeline_module, class_name) |
|
importable_classes = ALL_IMPORTABLE_CLASSES |
|
class_candidates = {c: class_obj for c in importable_classes.keys()} |
|
else: |
|
|
|
library = importlib.import_module(library_name) |
|
|
|
class_obj = getattr(library, class_name) |
|
importable_classes = LOADABLE_CLASSES[library_name] |
|
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} |
|
|
|
if loaded_sub_model is None and sub_model_should_be_defined: |
|
load_method_name = None |
|
for class_name, class_candidate in class_candidates.items(): |
|
if class_candidate is not None and issubclass(class_obj, class_candidate): |
|
load_method_name = importable_classes[class_name][1] |
|
|
|
if load_method_name is None: |
|
none_module = class_obj.__module__ |
|
is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( |
|
TRANSFORMERS_DUMMY_MODULES_FOLDER |
|
) |
|
if is_dummy_path and "dummy" in none_module: |
|
|
|
class_obj() |
|
|
|
raise ValueError( |
|
f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" |
|
f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." |
|
) |
|
|
|
load_method = getattr(class_obj, load_method_name) |
|
loading_kwargs = {} |
|
|
|
if issubclass(class_obj, torch.nn.Module): |
|
loading_kwargs["torch_dtype"] = torch_dtype |
|
if issubclass(class_obj, diffusers.OnnxRuntimeModel): |
|
loading_kwargs["provider"] = provider |
|
loading_kwargs["sess_options"] = sess_options |
|
|
|
is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin) |
|
is_transformers_model = ( |
|
is_transformers_available() |
|
and issubclass(class_obj, PreTrainedModel) |
|
and version.parse(version.parse(transformers.__version__).base_version) >= version.parse("4.20.0") |
|
) |
|
|
|
|
|
|
|
|
|
if is_diffusers_model or is_transformers_model: |
|
loading_kwargs["device_map"] = device_map |
|
loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage |
|
|
|
|
|
if os.path.isdir(os.path.join(cached_folder, name)): |
|
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) |
|
else: |
|
|
|
loaded_sub_model = load_method(cached_folder, **loading_kwargs) |
|
|
|
init_kwargs[name] = loaded_sub_model |
|
|
|
|
|
missing_modules = set(expected_modules) - set(init_kwargs.keys()) |
|
if len(missing_modules) > 0 and missing_modules <= set(passed_class_obj.keys()): |
|
for module in missing_modules: |
|
init_kwargs[module] = passed_class_obj[module] |
|
elif len(missing_modules) > 0: |
|
passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) |
|
raise ValueError( |
|
f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." |
|
) |
|
|
|
|
|
model = pipeline_class(**init_kwargs) |
|
return model |
|
|
|
@property |
|
def components(self) -> Dict[str, Any]: |
|
r""" |
|
|
|
The `self.components` property can be useful to run different pipelines with the same weights and |
|
configurations to not have to re-allocate memory. |
|
|
|
Examples: |
|
|
|
```py |
|
>>> from diffusers import ( |
|
... StableDiffusionPipeline, |
|
... StableDiffusionImg2ImgPipeline, |
|
... StableDiffusionInpaintPipeline, |
|
... ) |
|
|
|
>>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
|
>>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) |
|
>>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) |
|
``` |
|
|
|
Returns: |
|
A dictionaly containing all the modules needed to initialize the pipeline. |
|
""" |
|
components = {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")} |
|
expected_modules = set(inspect.signature(self.__init__).parameters.keys()) - set(["self"]) |
|
|
|
if set(components.keys()) != expected_modules: |
|
raise ValueError( |
|
f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" |
|
f" {expected_modules} to be defined, but {components} are defined." |
|
) |
|
|
|
return components |
|
|
|
@staticmethod |
|
def numpy_to_pil(images): |
|
""" |
|
Convert a numpy image or a batch of images to a PIL image. |
|
""" |
|
if images.ndim == 3: |
|
images = images[None, ...] |
|
images = (images * 255).round().astype("uint8") |
|
if images.shape[-1] == 1: |
|
|
|
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] |
|
else: |
|
pil_images = [Image.fromarray(image) for image in images] |
|
|
|
return pil_images |
|
|
|
def progress_bar(self, iterable): |
|
if not hasattr(self, "_progress_bar_config"): |
|
self._progress_bar_config = {} |
|
elif not isinstance(self._progress_bar_config, dict): |
|
raise ValueError( |
|
f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." |
|
) |
|
|
|
return tqdm(iterable, **self._progress_bar_config) |
|
|
|
def set_progress_bar_config(self, **kwargs): |
|
self._progress_bar_config = kwargs |
|
|