leepanic commited on
Commit
c082f60
·
verified ·
1 Parent(s): 9ffd10a

Upload processor

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "processing_prismatic.PrismaticImageProcessor",
4
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
5
+ },
6
+ "image_processor_type": "PrismaticImageProcessor",
7
+ "image_resize_strategy": "resize-naive",
8
+ "input_sizes": [
9
+ [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ [
15
+ 3,
16
+ 224,
17
+ 224
18
+ ]
19
+ ],
20
+ "interpolations": [
21
+ "bicubic",
22
+ "bicubic"
23
+ ],
24
+ "means": [
25
+ [
26
+ 0.485,
27
+ 0.456,
28
+ 0.406
29
+ ],
30
+ [
31
+ 0.5,
32
+ 0.5,
33
+ 0.5
34
+ ]
35
+ ],
36
+ "processor_class": "PrismaticProcessor",
37
+ "stds": [
38
+ [
39
+ 0.229,
40
+ 0.224,
41
+ 0.225
42
+ ],
43
+ [
44
+ 0.5,
45
+ 0.5,
46
+ 0.5
47
+ ]
48
+ ],
49
+ "tvf_crop_params": [
50
+ {
51
+ "output_size": [
52
+ 224,
53
+ 224
54
+ ]
55
+ },
56
+ {
57
+ "output_size": [
58
+ 224,
59
+ 224
60
+ ]
61
+ }
62
+ ],
63
+ "tvf_do_letterbox": false,
64
+ "tvf_letterbox_fill": null,
65
+ "tvf_normalize_params": [
66
+ {
67
+ "inplace": false,
68
+ "mean": [
69
+ 0.484375,
70
+ 0.455078125,
71
+ 0.40625
72
+ ],
73
+ "std": [
74
+ 0.228515625,
75
+ 0.2236328125,
76
+ 0.224609375
77
+ ]
78
+ },
79
+ {
80
+ "inplace": false,
81
+ "mean": [
82
+ 0.5,
83
+ 0.5,
84
+ 0.5
85
+ ],
86
+ "std": [
87
+ 0.5,
88
+ 0.5,
89
+ 0.5
90
+ ]
91
+ }
92
+ ],
93
+ "tvf_resize_params": [
94
+ {
95
+ "antialias": true,
96
+ "interpolation": 3,
97
+ "max_size": null,
98
+ "size": [
99
+ 224,
100
+ 224
101
+ ]
102
+ },
103
+ {
104
+ "antialias": true,
105
+ "interpolation": 3,
106
+ "max_size": null,
107
+ "size": [
108
+ 224,
109
+ 224
110
+ ]
111
+ }
112
+ ],
113
+ "use_fused_vision_backbone": true
114
+ }
processing_prismatic.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ processing_prismatic.py
3
+
4
+ HuggingFace-style preprocessor definitions for Prismatic VLMs, inheriting from `ProcessorMixin`. Default configuration
5
+ specifies `siglip-224px+7b`.
6
+ """
7
+
8
+ from typing import Any, ClassVar, List, Optional, Tuple, Union
9
+
10
+ import timm.data
11
+ import torch
12
+ import torchvision.transforms.functional as TVF
13
+ from PIL import Image
14
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
15
+ from transformers import PreTrainedTokenizerBase
16
+ from transformers.image_processing_utils import BatchFeature, ImageProcessingMixin
17
+ from transformers.processing_utils import ProcessorMixin
18
+ from transformers.tokenization_utils import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
19
+ from transformers.utils import TensorType
20
+
21
+
22
+ # === Image Processing ===
23
+ def letterbox_pad_transform(image: Image.Image, padding_fill_value: Tuple[int, int, int]) -> Image.Image:
24
+ """Given a PIL.Image, pad to square by adding a symmetric border around the height/width."""
25
+ (w, h), max_wh = image.size, max(image.size)
26
+ horizontal_pad, vertical_pad = int((max_wh - w) / 2), int((max_wh - h) / 2)
27
+ padding = (horizontal_pad, vertical_pad, horizontal_pad, vertical_pad)
28
+
29
+ return TVF.pad(image, padding, fill=padding_fill_value, padding_mode="constant")
30
+
31
+
32
+ class PrismaticImageProcessor(ImageProcessingMixin):
33
+ model_input_names: ClassVar[List[str]] = ["pixel_values"]
34
+
35
+ def __init__(
36
+ self,
37
+ use_fused_vision_backbone: bool = False,
38
+ image_resize_strategy: str = "letterbox",
39
+ input_sizes: Optional[List[Tuple[int, int, int]]] = None,
40
+ interpolations: Optional[List[str]] = None,
41
+ means: Optional[List[Tuple[float, float, float]]] = None,
42
+ stds: Optional[List[Tuple[float, float, float]]] = None,
43
+ **kwargs: str,
44
+ ) -> None:
45
+ """
46
+ Initialize a PrismaticImageProcessor as a wrapper around a torchvision transform; this transform will be
47
+ created by TIMM, and edited to follow our custom `image_resize_strategy` logic.
48
+ @param use_fused_vision_backbone: Boolean indicating single or fused (dual) vision backbone
49
+ @param image_resize_strategy: Prismatic image resize strategy in < resize-naive | resize-crop | letterbox >
50
+ @param input_size: [TIMM :: `data_cfg`] Input image size as tuple (channels, width, height)
51
+ @param interpolation: [TIMM :: `data_cfg`] Interpolation as string (default: "bicubic")
52
+ @param mean: [TIMM :: `data_cfg`] Normalization mean as float tuple (or two-tuple if `fused_backbone`)
53
+ @param std: [TIMM :: `data_cfg`] Normalization std as float tuple (or two-tuple if `fused_backbone`)
54
+ """
55
+ self.use_fused_vision_backbone = use_fused_vision_backbone
56
+ self.image_resize_strategy = image_resize_strategy
57
+
58
+ # Handle `None` default values
59
+ input_sizes = [(3, 224, 224)] if input_sizes is None else input_sizes
60
+ means = [(0.5, 0.5, 0.5)] if means is None else means
61
+ stds = [(0.5, 0.5, 0.5)] if stds is None else stds
62
+
63
+ # TIMM `data_cfg` Parameters
64
+ self.input_sizes, self.interpolations, self.means, self.stds = input_sizes, interpolations, means, stds
65
+
66
+ # Grab torchvision transforms via TIMM =>> need to parse for specific "functional" transform values!
67
+ self.tvf_resize_params, self.tvf_crop_params, self.tvf_normalize_params = [], [], []
68
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
69
+
70
+ for idx in range(len(input_sizes)):
71
+ transform = timm.data.create_transform(
72
+ input_size=self.input_sizes[idx],
73
+ interpolation=self.interpolations[idx],
74
+ mean=self.means[idx],
75
+ std=self.stds[idx],
76
+ crop_pct=1.0, # Set to 1.0 to ignore cropping (initial Resize sets `input_size`)
77
+ crop_mode="center", # Default crop mode -- no-op when `crop_pct == 1.0`
78
+ is_training=False, # No image augmentations when loading the transform!
79
+ )
80
+
81
+ # [Validation] Ensure appropriate transform structure, expected sizes
82
+ if not (
83
+ isinstance(transform, Compose)
84
+ and (len(transform.transforms) == 4)
85
+ and isinstance(transform.transforms[0], Resize)
86
+ and isinstance(transform.transforms[1], CenterCrop)
87
+ and isinstance(transform.transforms[2], ToTensor)
88
+ and isinstance(transform.transforms[3], Normalize)
89
+ and (transform.transforms[0].size == self.input_sizes[idx][-1])
90
+ and (transform.transforms[1].size == self.input_sizes[idx][-2:])
91
+ ):
92
+ raise ValueError(f"Unexpected TIMM image transformation structure/sizes: `{transform}`")
93
+
94
+ # HF Image Processors *must* be JSON-serializable; as such, cannot have torchvision. as an attribute.
95
+ # => Instead, we're going to parse the transform and call "torchvision.transforms.functional" (`tvf`)
96
+ resize_t, crop_t, norm_t = transform.transforms[0], transform.transforms[1], transform.transforms[3]
97
+ self.tvf_resize_params.append(
98
+ {
99
+ "size": resize_t.size,
100
+ "interpolation": TVF.pil_modes_mapping[resize_t.interpolation],
101
+ "max_size": None,
102
+ "antialias": True,
103
+ }
104
+ )
105
+ self.tvf_crop_params.append({"output_size": crop_t.size})
106
+ self.tvf_normalize_params.append(
107
+ {
108
+ "mean": norm_t.mean.float().numpy().tolist(),
109
+ "std": norm_t.std.float().numpy().tolist(),
110
+ "inplace": False,
111
+ }
112
+ )
113
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
114
+
115
+ # Handle Prismatic `image_resize_strategy`
116
+ if self.image_resize_strategy == "resize-naive":
117
+ self.tvf_resize_params[idx]["size"] = (resize_t.size, resize_t.size)
118
+ elif self.image_resize_strategy == "letterbox":
119
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = True, tuple([int(x * 255) for x in self.means[idx]])
120
+ elif self.image_resize_strategy == "resize-crop":
121
+ pass
122
+ else:
123
+ raise ValueError(f"Image resize strategy `{self.image_resize_strategy}` is not supported!")
124
+
125
+ # Dispatch **kwargs to super()
126
+ super().__init__(**kwargs)
127
+
128
+ def apply_transform(self, img: Image.Image) -> torch.Tensor:
129
+ """Apply `functional` variant of TIMM's Transform = Compose([Resize -> CenterCrop -> ToTensor -> Normalize])"""
130
+ if self.tvf_do_letterbox:
131
+ img = letterbox_pad_transform(img, self.tvf_letterbox_fill)
132
+
133
+ # [Contract] Fused Backbones expect "channel-stacked" inputs; we'll unpack on the model side!
134
+ imgs_t = []
135
+ for idx in range(len(self.input_sizes)):
136
+ img_idx = TVF.resize(img, **self.tvf_resize_params[idx])
137
+ img_idx = TVF.center_crop(img_idx, **self.tvf_crop_params[idx])
138
+ img_idx_t = TVF.to_tensor(img_idx)
139
+ img_idx_t = TVF.normalize(img_idx_t, **self.tvf_normalize_params[idx])
140
+ imgs_t.append(img_idx_t)
141
+
142
+ # [Contract] `imgs_t` is a list of Tensors of shape [3, input_size, input_size]; stack along dim = 0
143
+ img_t = torch.vstack(imgs_t)
144
+
145
+ return img_t
146
+
147
+ def preprocess(
148
+ self,
149
+ images: Union[Image.Image, List[Image.Image]],
150
+ return_tensors: Optional[Union[str, TensorType]] = None,
151
+ **_: str,
152
+ ) -> BatchFeature:
153
+ """
154
+ Preprocess an image (or batch of images); note that unlike the `transformers :: BaseImageProcessor` we
155
+ explicitly only handle PIL.Image.Image instances for simplicity.
156
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
157
+ @param return_tensors: BatchFeature default Tensor format (e.g., "pt" for torch); if None, returns np.ndarray
158
+ @return: Instance of `transformers :: BatchFeature` with a single key "pixel_values"
159
+ """
160
+ if not isinstance(images, list):
161
+ images = [images]
162
+
163
+ # Apply `self.img_transform` to each image (will return list of torch.Tensors); stack into "batched" Tensor
164
+ pixel_values = torch.stack([self.apply_transform(img.convert("RGB")) for img in images])
165
+
166
+ # Return BatchFeature =>> note that for compatibility, constructor expects Dict[str, np.ndarray], so we convert
167
+ return BatchFeature(data={"pixel_values": pixel_values.float().numpy()}, tensor_type=return_tensors)
168
+
169
+ def __call__(self, images: Union[Image.Image, List[Image.Image]], **kwargs) -> BatchFeature:
170
+ return self.preprocess(images, **kwargs)
171
+
172
+
173
+ # === PrismaticProcessor =>> Wraps both ImageProcessor and Tokenizer ===
174
+ # =>> https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/processing_llava.py
175
+ class PrismaticProcessor(ProcessorMixin):
176
+ attributes: ClassVar[List[str]] = ["image_processor", "tokenizer"]
177
+ image_processor_class: str = "AutoImageProcessor"
178
+ tokenizer_class: str = "AutoTokenizer"
179
+
180
+ def __init__(
181
+ self,
182
+ image_processor: Optional[ImageProcessingMixin] = None,
183
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
184
+ ) -> None:
185
+ super().__init__(image_processor, tokenizer)
186
+
187
+ def __call__(
188
+ self,
189
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
190
+ images: Union[Image.Image, List[Image.Image]],
191
+ padding: Union[bool, str, PaddingStrategy] = False,
192
+ truncation: Optional[Union[bool, str, TruncationStrategy]] = None,
193
+ max_length: Optional[int] = None,
194
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
195
+ ) -> BatchFeature:
196
+ """
197
+ Preprocess a given (batch) of text/images for a Prismatic VLM; forwards text to the underlying LLM's tokenizer,
198
+ forwards images to PrismaticImageProcessor.
199
+ @param text: The (batch) of text to encode; must be a string or list of strings.
200
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
201
+ @param padding: Sequence padding strategy (if multiple specified) in < True = "longest" | "max_length" | False >
202
+ @param truncation: Truncation strategy for the output sequences; requires `max_length` to be specified
203
+ @param max_length: Maximum length (in tokens) to truncate
204
+ @param return_tensors: Type of return tensors (usually "pt" or TensorType.PYTORCH)
205
+ @return: BatchFeature with keys for `input_ids`, `attention_mask` and `pixel_values`.
206
+ """
207
+ pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
208
+ text_inputs = self.tokenizer(
209
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
210
+ )
211
+
212
+ # [Validate] Need same number of images and text inputs!
213
+ # if pixel_values.shape[0] != text_inputs.input_ids.shape[0]:
214
+ # raise ValueError("Batch is malformed; expected same number of images and text inputs!")
215
+
216
+ return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
217
+
218
+ # === Tokenizer Dispatch Utilities =>> check `PreTrainedTokenizerBase` for documentation ===
219
+ def batch_decode(
220
+ self,
221
+ sequences: Union[List[int], List[List[int]], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
222
+ skip_special_tokens: bool = False,
223
+ clean_up_tokenization_spaces: Optional[bool] = None,
224
+ **kwargs: str,
225
+ ) -> List[str]:
226
+ return self.tokenizer.batch_decode(
227
+ sequences=sequences,
228
+ skip_special_tokens=skip_special_tokens,
229
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
230
+ **kwargs,
231
+ )
232
+
233
+ def decode(
234
+ self,
235
+ token_ids: Union[int, List[int], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
236
+ skip_special_tokens: bool = False,
237
+ clean_up_tokenization_spaces: Optional[bool] = None,
238
+ **kwargs: str,
239
+ ) -> str:
240
+ return self.tokenizer.decode(
241
+ token_ids=token_ids,
242
+ skip_special_tokens=skip_special_tokens,
243
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
244
+ **kwargs,
245
+ )
246
+
247
+ @property
248
+ def model_input_names(self) -> List[str]:
249
+ tokenizer_input_names = self.tokenizer.model_input_names
250
+ image_processor_input_names = self.image_processor.model_input_names
251
+
252
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
4
+ },
5
+ "processor_class": "PrismaticProcessor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "auto_map": {
39
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
40
+ },
41
+ "bos_token": "<s>",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "</s>",
44
+ "legacy": false,
45
+ "model_max_length": 2048,
46
+ "pad_token": "<PAD>",
47
+ "padding_side": "right",
48
+ "processor_class": "PrismaticProcessor",
49
+ "sp_model_kwargs": {},
50
+ "tokenizer_class": "LlamaTokenizer",
51
+ "unk_token": "<unk>",
52
+ "use_default_system_prompt": false
53
+ }