File size: 7,079 Bytes
e76d237 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EMOVA model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
from transformers.models.auto import CONFIG_MAPPING
from .configuration_qwen2vit import Qwen2VLVisionConfig
logger = logging.get_logger(__name__)
class EMOVAConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EMOVAForConditionalGeneration`]. It is used to instantiate an
EMOVA model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
ignore_index (`int`, *optional*, defaults to -100):
The ignore index for the loss function.
image_token_index (`int`, *optional*, defaults to 32000):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -2):
The index of the layer to select the vision feature.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
Example:
```python
>>> from transformers import EMOVAForConditionalGeneration, EMOVAConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> # Initializing a EMOVA style configuration
>>> configuration = EMOVAConfig(vision_config, text_config)
>>> # Initializing a model from the style configuration
>>> model = EMOVAForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "emova"
is_composition = False
def __init__(
self,
vision_config=None,
mm_projector_config=None,
text_config=None,
ignore_index=-100,
image_token_index=32000,
vision_feature_select_strategy="default",
vision_feature_layer=-2,
is_native_resolution=True,
image_grid_pinpoints=None,
use_image_newline_parameter=False,
tie_word_embeddings=False,
**kwargs,
):
self.ignore_index = ignore_index
self.image_token_index = image_token_index
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
# self.vision_feature_select_strategy = vision_feature_select_strategy
# self.vision_feature_layer = vision_feature_layer
# image_grid_pinpoints = (
# image_grid_pinpoints
# if image_grid_pinpoints is not None
# else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
# )
# self.is_native_resolution = is_native_resolution
# self.image_grid_pinpoints = image_grid_pinpoints if not self.is_native_resolution else None
# self.use_image_newline_parameter = use_image_newline_parameter
if isinstance(vision_config, dict):
vision_config = Qwen2VLVisionConfig(**vision_config)
elif vision_config is None:
vision_config = Qwen2VLVisionConfig(
depth=32,
embed_dim=1280,
hidden_act="quick_gelu",
hidden_size=3584,
in_channels=3,
in_chans=3,
mlp_ratio=4,
model_type="qwen2_vl",
num_heads=16,
patch_size=14,
spatial_merge_size=2,
spatial_patch_size=14,
temporal_patch_size=2,
initializer_range=0.02,
)
# if isinstance(vision_config, dict):
# vision_config["model_type"] = (
# vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
# )
# vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
# elif vision_config is None:
# vision_config = CONFIG_MAPPING["clip_vision_model"](
# intermediate_size=4096,
# hidden_size=1024,
# patch_size=14,
# image_size=336,
# num_hidden_layers=24,
# num_attention_heads=16,
# vocab_size=32000,
# projection_dim=768,
# )
self.vision_config = vision_config
self.mm_projector_config = mm_projector_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|