MingxuChai commited on
Commit
8387cee
·
verified ·
1 Parent(s): 8af2cf9

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "docfusion",
3
+ "architectures": [
4
+ "DocfusionForConditionalGeneration"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_docfusion.DocfusionConfig",
8
+ "AutoModelForCausalLM": "modeling_docfusion.DocfusionForConditionalGeneration"
9
+ },
10
+ "bos_token_id": 0,
11
+ "eos_token_id": 2,
12
+ "ignore_index": -100,
13
+ "model_type": "docfusion",
14
+ "pad_token_id": 1,
15
+ "projection_dim": 768,
16
+ "text_config": {
17
+ "vocab_size": 51289,
18
+ "activation_dropout": 0.1,
19
+ "activation_function": "gelu",
20
+ "add_bias_logits": false,
21
+ "add_final_layer_norm": false,
22
+ "attention_dropout": 0.1,
23
+ "bos_token_id": 0,
24
+ "classif_dropout": 0.1,
25
+ "classifier_dropout": 0.0,
26
+ "d_model": 768,
27
+ "decoder_attention_heads": 12,
28
+ "decoder_ffn_dim": 3072,
29
+ "decoder_layerdrop": 0.0,
30
+ "decoder_layers": 12,
31
+ "decoder_start_token_id": 2,
32
+ "dropout": 0.1,
33
+ "early_stopping": true,
34
+ "encoder_attention_heads": 12,
35
+ "encoder_ffn_dim": 3072,
36
+ "encoder_layerdrop": 0.0,
37
+ "encoder_layers": 0,
38
+ "eos_token_id": 2,
39
+ "forced_eos_token_id": 2,
40
+ "forced_bos_token_id": 0,
41
+ "gradient_checkpointing": false,
42
+ "init_std": 0.02,
43
+ "is_encoder_decoder": true,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1,
47
+ "LABEL_2": 2
48
+ },
49
+ "max_position_embeddings": 1024,
50
+ "normalize_before": false,
51
+ "pad_token_id": 1,
52
+ "scale_embedding": false,
53
+ "num_beams": 3
54
+ },
55
+ "vision_config": {
56
+ "model_type": "davit",
57
+ "drop_path_rate": 0.1,
58
+ "patch_size": [7, 3, 3, 3],
59
+ "patch_stride": [4, 2, 2, 2],
60
+ "patch_padding": [3, 1, 1, 1],
61
+ "patch_prenorm": [false, true, true, true],
62
+ "enable_checkpoint": false,
63
+ "dim_embed": [128, 256, 512, 1024],
64
+ "num_heads": [4, 8, 16, 32],
65
+ "num_groups": [4, 8, 16, 32],
66
+ "depths": [1, 1, 9, 1],
67
+ "window_size": 12,
68
+ "projection_dim": 768,
69
+ "visual_temporal_embedding": {
70
+ "type": "COSINE",
71
+ "max_temporal_embeddings": 100
72
+ },
73
+ "image_pos_embed": {
74
+ "type": "learned_abs_2d",
75
+ "max_pos_embeddings": 50
76
+ },
77
+ "image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
78
+ },
79
+ "vocab_size": 51289,
80
+ "torch_dtype": "float16",
81
+ "transformers_version": "4.41.0.dev0",
82
+ "is_encoder_decoder": true
83
+ }
configuration_docfusion.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 Fudan NLP. All rights reserved.
3
+ # This code is licensed under the MIT License; you are free to use, copy, modify, and distribute
4
+ # this code, provided that you retain this copyright notice and the following license statement:
5
+ #
6
+ # This implementation is primarily modified from the paper: https://arxiv.org/pdf/2311.06242
7
+ # Additionally, parts of the code are adapted from https://huggingface.co/microsoft/Florence-2-base.
8
+ # For further details and theoretical background, please refer to the paper.
9
+
10
+ import warnings
11
+ """ Docfusion configuration"""
12
+
13
+ from typing import Optional
14
+
15
+ from transformers import AutoConfig
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+ class DocfusionVisionConfig(PretrainedConfig):
22
+ r"""
23
+ This is the configuration class to store the configuration of a [`DocfusionVisionModel`]. It is used to instantiate a DocfusionVisionModel
24
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
25
+ defaults will yield a similar configuration to that of the DocfusionVisionModel architecture.
26
+
27
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
28
+ documentation from [`PretrainedConfig`] for more information.
29
+
30
+ Args:
31
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
32
+ The dropout rate of the drop path layer.
33
+ patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
34
+ The patch size of the image.
35
+ patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
36
+ The patch stride of the image.
37
+ patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
38
+ The patch padding of the image.
39
+ patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
40
+ Whether to apply layer normalization before the patch embedding layer.
41
+ enable_checkpoint (`bool`, *optional*, defaults to False):
42
+ Whether to enable checkpointing.
43
+ dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
44
+ The dimension of the embedding layer.
45
+ num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
46
+ The number of attention heads.
47
+ num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
48
+ The number of groups.
49
+ depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
50
+ The depth of the model.
51
+ window_size (`int`, *optional*, defaults to 12):
52
+ The window size of the model.
53
+ projection_dim (`int`, *optional*, defaults to 1024):
54
+ The dimension of the projection layer.
55
+ visual_temporal_embedding (`dict`, *optional*):
56
+ The configuration of the visual temporal embedding.
57
+ image_pos_embed (`dict`, *optional*):
58
+ The configuration of the image position embedding.
59
+ image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
60
+ The source of the image feature.
61
+ Example:
62
+
63
+ ```python
64
+ >>> from transformers import DocfusionVisionConfig, DocfusionVisionModel
65
+
66
+ >>> # Initializing a Docfusion Vision style configuration
67
+ >>> configuration = DocfusionVisionConfig()
68
+
69
+ >>> # Initializing a model (with random weights)
70
+ >>> model = DocfusionVisionModel(configuration)
71
+
72
+ >>> # Accessing the model configuration
73
+ >>> configuration = model.config
74
+ ```"""
75
+
76
+ model_type = "docfusion_vision"
77
+ keys_to_ignore_at_inference = ["past_key_values"]
78
+
79
+ def __init__(
80
+ self,
81
+ drop_path_rate=0.1,
82
+ patch_size=[7, 3, 3, 3],
83
+ patch_stride=[4, 2, 2, 2],
84
+ patch_padding=[3, 1, 1, 1],
85
+ patch_prenorm=[False, True, True, True],
86
+ enable_checkpoint=False,
87
+ dim_embed=[256, 512, 1024, 2048],
88
+ num_heads=[8, 16, 32, 64],
89
+ num_groups=[8, 16, 32, 64],
90
+ depths=[1, 1, 9, 1],
91
+ window_size=12,
92
+ projection_dim=1024,
93
+ visual_temporal_embedding=None,
94
+ image_pos_embed=None,
95
+ image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
96
+ **kwargs,
97
+ ):
98
+ self.drop_path_rate = drop_path_rate
99
+ self.patch_size = patch_size
100
+ self.patch_stride = patch_stride
101
+ self.patch_padding = patch_padding
102
+ self.patch_prenorm = patch_prenorm
103
+ self.enable_checkpoint = enable_checkpoint
104
+ self.dim_embed = dim_embed
105
+ self.num_heads = num_heads
106
+ self.num_groups = num_groups
107
+ self.depths = depths
108
+ self.window_size = window_size
109
+ self.projection_dim = projection_dim
110
+ self.visual_temporal_embedding = visual_temporal_embedding
111
+ self.image_pos_embed = image_pos_embed
112
+ self.image_feature_source = image_feature_source
113
+
114
+ super().__init__(**kwargs)
115
+
116
+
117
+
118
+ class DocfusionLanguageConfig(PretrainedConfig):
119
+ r"""
120
+ This is the configuration class to store the configuration of a [`DocfusionLanguagePreTrainedModel`]. It is used to instantiate a BART
121
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
122
+ defaults will yield a similar configuration to that of the BART
123
+ [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
124
+
125
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
126
+ documentation from [`PretrainedConfig`] for more information.
127
+
128
+
129
+ Args:
130
+ vocab_size (`int`, *optional*, defaults to 51289):
131
+ Vocabulary size of the DocfusionLanguage model. Defines the number of different tokens that can be represented by the
132
+ `inputs_ids` passed when calling [`DocfusionLanguageModel`].
133
+ d_model (`int`, *optional*, defaults to 1024):
134
+ Dimensionality of the layers and the pooler layer.
135
+ encoder_layers (`int`, *optional*, defaults to 12):
136
+ Number of encoder layers.
137
+ decoder_layers (`int`, *optional*, defaults to 12):
138
+ Number of decoder layers.
139
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
140
+ Number of attention heads for each attention layer in the Transformer encoder.
141
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
142
+ Number of attention heads for each attention layer in the Transformer decoder.
143
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
144
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
145
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
146
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
147
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
148
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
149
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
150
+ dropout (`float`, *optional*, defaults to 0.1):
151
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
152
+ attention_dropout (`float`, *optional*, defaults to 0.0):
153
+ The dropout ratio for the attention probabilities.
154
+ activation_dropout (`float`, *optional*, defaults to 0.0):
155
+ The dropout ratio for activations inside the fully connected layer.
156
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
157
+ The dropout ratio for classifier.
158
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
159
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
160
+ just in case (e.g., 512 or 1024 or 2048).
161
+ init_std (`float`, *optional*, defaults to 0.02):
162
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
163
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
164
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
165
+ for more details.
166
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
167
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
168
+ for more details.
169
+ scale_embedding (`bool`, *optional*, defaults to `False`):
170
+ Scale embeddings by diving by sqrt(d_model).
171
+ use_cache (`bool`, *optional*, defaults to `True`):
172
+ Whether or not the model should return the last key/values attentions (not used by all models).
173
+ num_labels (`int`, *optional*, defaults to 3):
174
+ The number of labels to use in [`DocfusionLanguageForSequenceClassification`].
175
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
176
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
177
+ `eos_token_id`.
178
+
179
+ Example:
180
+
181
+ ```python
182
+ >>> from transformers import DocfusionLanguageConfig, DocfusionLanguageModel
183
+
184
+ >>> # Initializing a Docfusion Language style configuration
185
+ >>> configuration = DocfusionLanguageConfig()
186
+
187
+ >>> # Initializing a model (with random weights)
188
+ >>> model = DocfusionLangaugeModel(configuration)
189
+
190
+ >>> # Accessing the model configuration
191
+ >>> configuration = model.config
192
+ ```"""
193
+
194
+ model_type = "docfusion_language"
195
+ keys_to_ignore_at_inference = ["past_key_values"]
196
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
197
+
198
+ def __init__(
199
+ self,
200
+ vocab_size=51289,
201
+ max_position_embeddings=1024,
202
+ encoder_layers=12,
203
+ encoder_ffn_dim=4096,
204
+ encoder_attention_heads=16,
205
+ decoder_layers=12,
206
+ decoder_ffn_dim=4096,
207
+ decoder_attention_heads=16,
208
+ encoder_layerdrop=0.0,
209
+ decoder_layerdrop=0.0,
210
+ activation_function="gelu",
211
+ d_model=1024,
212
+ dropout=0.1,
213
+ attention_dropout=0.0,
214
+ activation_dropout=0.0,
215
+ init_std=0.02,
216
+ classifier_dropout=0.0,
217
+ scale_embedding=False,
218
+ use_cache=True,
219
+ num_labels=3,
220
+ pad_token_id=1,
221
+ bos_token_id=0,
222
+ eos_token_id=2,
223
+ is_encoder_decoder=True,
224
+ decoder_start_token_id=2,
225
+ forced_eos_token_id=2,
226
+ **kwargs,
227
+ ):
228
+ self.vocab_size = vocab_size
229
+ self.max_position_embeddings = max_position_embeddings
230
+ self.d_model = d_model
231
+ self.encoder_ffn_dim = encoder_ffn_dim
232
+ self.encoder_layers = encoder_layers
233
+ self.encoder_attention_heads = encoder_attention_heads
234
+ self.decoder_ffn_dim = decoder_ffn_dim
235
+ self.decoder_layers = decoder_layers
236
+ self.decoder_attention_heads = decoder_attention_heads
237
+ self.dropout = dropout
238
+ self.attention_dropout = attention_dropout
239
+ self.activation_dropout = activation_dropout
240
+ self.activation_function = activation_function
241
+ self.init_std = init_std
242
+ self.encoder_layerdrop = encoder_layerdrop
243
+ self.decoder_layerdrop = decoder_layerdrop
244
+ self.classifier_dropout = classifier_dropout
245
+ self.use_cache = use_cache
246
+ self.num_hidden_layers = encoder_layers
247
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
248
+
249
+ super().__init__(
250
+ num_labels=num_labels,
251
+ pad_token_id=pad_token_id,
252
+ bos_token_id=bos_token_id,
253
+ eos_token_id=eos_token_id,
254
+ is_encoder_decoder=is_encoder_decoder,
255
+ decoder_start_token_id=decoder_start_token_id,
256
+ forced_eos_token_id=forced_eos_token_id,
257
+ **kwargs,
258
+ )
259
+
260
+ # ensure backward compatibility for BART CNN models
261
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
262
+ self.forced_bos_token_id = self.bos_token_id
263
+ warnings.warn(
264
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
265
+ "The config can simply be saved and uploaded again to be fixed."
266
+ )
267
+
268
+ class DocfusionConfig(PretrainedConfig):
269
+ r"""
270
+ This is the configuration class to store the configuration of a [`DocfusionForConditionalGeneration`]. It is used to instantiate an
271
+ Docfusion model according to the specified arguments, defining the model architecture.
272
+
273
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
274
+ documentation from [`PretrainedConfig`] for more information.
275
+
276
+ Args:
277
+ vision_config (`DocfusionVisionConfig`, *optional*):
278
+ Custom vision config or dict
279
+ text_config (`Union[AutoConfig, dict]`, *optional*):
280
+ The config object of the text backbone.
281
+ ignore_index (`int`, *optional*, defaults to -100):
282
+ The ignore index for the loss function.
283
+ vocab_size (`int`, *optional*, defaults to 51289):
284
+ Vocabulary size of the Docfusionmodel. Defines the number of different tokens that can be represented by the
285
+ `inputs_ids` passed when calling [`~DocfusionForConditionalGeneration`]
286
+ projection_dim (`int`, *optional*, defaults to 1024):
287
+ Dimension of the multimodal projection space.
288
+
289
+ Example:
290
+
291
+ ```python
292
+ >>> from transformers import DocfusionForConditionalGeneration, DocfusionConfig, CLIPVisionConfig, BartConfig
293
+
294
+ >>> # Initializing a clip-like vision config
295
+ >>> vision_config = CLIPVisionConfig()
296
+
297
+ >>> # Initializing a Bart config
298
+ >>> text_config = BartConfig()
299
+
300
+ >>> # Initializing a Docfusion configuration
301
+ >>> configuration = DocfusionConfig(vision_config, text_config)
302
+
303
+ >>> # Initializing a model from the docfusion configuration
304
+ >>> model = DocfusionForConditionalGeneration(configuration)
305
+
306
+ >>> # Accessing the model configuration
307
+ >>> configuration = model.config
308
+ ```"""
309
+
310
+ model_type = "docfusion"
311
+ is_composition = False
312
+
313
+ def __init__(
314
+ self,
315
+ vision_config=None,
316
+ text_config=None,
317
+ ignore_index=-100,
318
+ vocab_size=51289,
319
+ projection_dim=1024,
320
+ **kwargs,
321
+ ):
322
+ self.ignore_index = ignore_index
323
+ self.vocab_size = vocab_size
324
+ self.projection_dim = projection_dim
325
+ if vision_config is not None:
326
+ vision_config = PretrainedConfig(**vision_config)
327
+ self.vision_config = vision_config
328
+ self.vocab_size = self.vocab_size
329
+
330
+ self.text_config = text_config
331
+ if text_config is not None:
332
+ self.text_config = DocfusionLanguageConfig(**text_config)
333
+
334
+
335
+ super().__init__(**kwargs)
336
+
modeling_docfusion.py ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_docfusion.DocfusionProcessor"
4
+ },
5
+ "_valid_processor_keys": [
6
+ "images",
7
+ "do_resize",
8
+ "size",
9
+ "resample",
10
+ "do_rescale",
11
+ "rescale_factor",
12
+ "do_normalize",
13
+ "image_mean",
14
+ "image_std",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format",
18
+ "do_convert_rgb"
19
+ ],
20
+ "do_convert_rgb": null,
21
+ "do_normalize": true,
22
+ "do_rescale": true,
23
+ "do_resize": true,
24
+ "do_center_crop": false,
25
+ "image_processor_type": "CLIPImageProcessor",
26
+ "image_seq_length": 577,
27
+ "image_mean": [0.485, 0.456, 0.406],
28
+ "image_std": [0.229, 0.224, 0.225],
29
+ "processor_class": "DocfusionProcessor",
30
+ "resample": 3,
31
+ "size": {
32
+ "height": 768,
33
+ "width":768
34
+ },
35
+ "crop_size": {
36
+ "height": 768,
37
+ "width": 768
38
+ }
39
+ }
processing_docfusion.py ADDED
@@ -0,0 +1,1149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 Fudan NLP. All rights reserved.
3
+ # This code is licensed under the MIT License; you are free to use, copy, modify, and distribute
4
+ # this code, provided that you retain this copyright notice and the following license statement:
5
+ #
6
+ # This implementation is primarily modified from the paper: https://arxiv.org/pdf/2311.06242
7
+ # Additionally, parts of the code are adapted from https://huggingface.co/microsoft/Florence-2-base.
8
+ # For further details and theoretical background, please refer to the paper.
9
+
10
+ """
11
+ Processor class for Docfusion.
12
+ """
13
+
14
+ import re
15
+ import logging
16
+ from typing import List, Optional, Union
17
+ import numpy as np
18
+
19
+ import torch
20
+
21
+ from PIL import Image, ImageOps
22
+ from transformers.feature_extraction_utils import BatchFeature
23
+ from transformers.image_utils import ImageInput, is_valid_image
24
+ from transformers.processing_utils import ProcessorMixin
25
+ from transformers.tokenization_utils_base import (
26
+ PaddingStrategy,
27
+ PreTokenizedInput,
28
+ TextInput,
29
+ TruncationStrategy,
30
+ )
31
+ from transformers.utils import TensorType
32
+
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ # Copied from transformers.models.idefics2.processing_idefics2.is_url
37
+ def is_url(val) -> bool:
38
+ return isinstance(val, str) and val.startswith("http")
39
+
40
+ # Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
41
+ def is_image_or_image_url(elem):
42
+ return is_url(elem) or is_valid_image(elem)
43
+
44
+
45
+ def _is_str_or_image(elem):
46
+ return isinstance(elem, (str)) or is_image_or_image_url(elem)
47
+
48
+
49
+ class DocfusionProcessor(ProcessorMixin):
50
+ r"""
51
+ Constructs a Docfusion processor which wraps a Docfusion image processor and a Docfusion tokenizer into a single processor.
52
+
53
+ [`DocfusionProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BartTokenizerFast`]. See the
54
+ [`~DocfusionProcessor.__call__`] and [`~DocfusionProcessor.decode`] for more information.
55
+
56
+ Args:
57
+ image_processor ([`CLIPImageProcessor`], *optional*):
58
+ The image processor is a required input.
59
+ tokenizer ([`BartTokenizerFast`], *optional*):
60
+ The tokenizer is a required input.
61
+ """
62
+
63
+ attributes = ["image_processor", "tokenizer"]
64
+ image_processor_class = "CLIPImageProcessor"
65
+ tokenizer_class = ("BartTokenizer", "BartTokenizerFast")
66
+
67
+ def __init__(
68
+ self,
69
+ image_processor=None,
70
+ tokenizer=None,
71
+ ):
72
+ if image_processor is None:
73
+ raise ValueError("You need to specify an `image_processor`.")
74
+ if tokenizer is None:
75
+ raise ValueError("You need to specify a `tokenizer`.")
76
+ if not hasattr(image_processor, "image_seq_length"):
77
+ raise ValueError("Image processor is missing an `image_seq_length` attribute.")
78
+
79
+ self.image_seq_length = image_processor.image_seq_length
80
+
81
+ tokens_to_add = {
82
+ 'additional_special_tokens': \
83
+ tokenizer.additional_special_tokens + \
84
+ ['<od>', '</od>', '<ocr>', '</ocr>'] + \
85
+ [f'<loc_{x}>' for x in range(1000)] + \
86
+ ['<cap>', '</cap>', '<ncap>', '</ncap>','<dcap>', '</dcap>', '<grounding>', '</grounding>', '<seg>', '</seg>', '<sep>', '<region_cap>', '</region_cap>', '<region_to_desciption>', '</region_to_desciption>', '<proposal>', '</proposal>', '<poly>', '</poly>', '<and>']
87
+ }
88
+ tokenizer.add_special_tokens(tokens_to_add)
89
+
90
+ self.tasks_answer_post_processing_type = {
91
+ # '<OCR>': 'pure_text',
92
+ '<OCR_WITH_REGION>': 'ocr',
93
+ '<CAPTION>': 'pure_text',
94
+ '<DETAILED_CAPTION>': 'pure_text',
95
+ '<MORE_DETAILED_CAPTION>': 'pure_text',
96
+ '<OD>': 'description_with_bboxes',
97
+ '<DENSE_REGION_CAPTION>': 'description_with_bboxes',
98
+ '<CAPTION_TO_PHRASE_GROUNDING>': "phrase_grounding",
99
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'polygons',
100
+ '<REGION_TO_SEGMENTATION>': 'polygons',
101
+ '<OPEN_VOCABULARY_DETECTION>': 'description_with_bboxes_or_polygons',
102
+ '<REGION_TO_CATEGORY>': 'pure_text',
103
+ '<REGION_TO_DESCRIPTION>': 'pure_text',
104
+ '<REGION_TO_OCR>': 'pure_text',
105
+ '<REGION_PROPOSAL>': 'bboxes',
106
+
107
+ # '<Decompile latex code>':'pure_text',
108
+ '<DLA>': 'description_with_bboxes',
109
+ '<OCR>': 'pure_text',
110
+ # '<Detect all the chars in this equtation>': 'description_with_bboxes',
111
+ '<MER>': 'pure_text',
112
+ '<TR>': 'pure_text',
113
+ }
114
+
115
+ self.task_prompts_without_inputs = {
116
+ # '<OCR>': 'What is the text in the image?',
117
+ '<OCR_WITH_REGION>': 'What is the text in the image, with regions?',
118
+ '<CAPTION>': 'What does the image describe?',
119
+ '<DETAILED_CAPTION>': 'Describe in detail what is shown in the image.',
120
+ '<MORE_DETAILED_CAPTION>': 'Describe with a paragraph what is shown in the image.',
121
+ # '<OD>': 'Locate the objects with category name in the image.',
122
+ '<DENSE_REGION_CAPTION>': 'Locate the objects in the image, with their descriptions.',
123
+ '<REGION_PROPOSAL>': 'Locate the region proposals in the image.',
124
+
125
+ # '<Detect all the chars in this equtation>': 'Identify and locate all characters in this equation.',
126
+ # '<Detect all the equations on the page>': 'Locate the region proposals in the image.',
127
+ '<DLA>': 'Locate the objects with category name in the image.',
128
+ '<OCR>': 'What is the text in the image?',
129
+ # '<Detect all the chars in this equtation>': 'Locate the segenmts in the equation, with their descriptions.',
130
+ # '<Detect all the chars in this equtation>': 'Locate the region proposals in the image.',
131
+ '<MER>': 'Decompile LaTeX code from the equation image.',
132
+ '<TR>': 'Decompile LaTeX code from the table image.',
133
+ }
134
+
135
+ self.task_prompts_with_input = {
136
+ '<CAPTION_TO_PHRASE_GROUNDING>': "Locate the phrases in the caption: {input}",
137
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'Locate {input} in the image with mask',
138
+ '<REGION_TO_SEGMENTATION>': 'What is the polygon mask of region {input}',
139
+ '<OPEN_VOCABULARY_DETECTION>': 'Locate {input} in the image.',
140
+ '<REGION_TO_CATEGORY>': 'What is the region {input}?',
141
+ '<REGION_TO_DESCRIPTION>': 'What does the region {input} describe?',
142
+ '<REGION_TO_OCR>': 'What text is in the region {input}?',
143
+ }
144
+
145
+ self.post_processor = DocfusionPostProcesser(tokenizer=tokenizer)
146
+
147
+
148
+ super().__init__(image_processor, tokenizer)
149
+
150
+ # 处理单个
151
+ def parse_location_string(self,location_string):
152
+ elements = location_string.replace('<loc_', '').replace('>', ' ').split()
153
+ if len(elements) % 4 != 0:
154
+ return None
155
+ elements = list(map(int, elements))
156
+ grouped_elements = [elements[i:i+4] for i in range(0, len(elements), 4)]
157
+
158
+ return grouped_elements
159
+
160
+ def process_bboxs_inputs(self,bboxes):
161
+ if isinstance(bboxes,str):
162
+ bboxes = [bboxes]
163
+ result = []
164
+ for bbox in bboxes:
165
+ result.append(self.parse_location_string(bbox))
166
+ return result
167
+
168
+ def resize_and_pad_image(self,image):
169
+ image = image.convert('RGB')
170
+ original_width, original_height = image.size
171
+
172
+ # 设置目标尺寸
173
+ target_size = 768
174
+
175
+ # 判断是否需要进一步缩放
176
+ if original_width > target_size or original_height > target_size:
177
+ # 计算缩放比例
178
+ scale = min(target_size / original_width, target_size / original_height)
179
+ # 根据比例缩放图像
180
+ new_width = int(original_width * scale)
181
+ new_height = int(original_height * scale)
182
+ image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
183
+ else:
184
+ # 如果不需要进一步缩放,保持当前尺寸
185
+ new_width, new_height = original_width, original_height
186
+
187
+ # 计算水平和垂直的填充量
188
+ padding_left = (target_size - new_width) // 2
189
+ padding_top = (target_size - new_height) // 2
190
+ padding_right = target_size - new_width - padding_left
191
+ padding_bottom = target_size - new_height - padding_top
192
+
193
+ # 使用白色(255, 255, 255)进行填充
194
+ padded_image = ImageOps.expand(image, (padding_left, padding_top, padding_right, padding_bottom), fill=(255, 255, 255))
195
+
196
+ return padded_image
197
+
198
+
199
+ def _construct_prompts(self, text):
200
+ # replace the task tokens with the task prompts if task token is in the text
201
+ prompts = []
202
+ for _text in text:
203
+ # 1. fixed task prompts without additional inputs
204
+ for task_token, task_prompt in self.task_prompts_without_inputs.items():
205
+ if task_token in _text:
206
+ assert _text == task_token, f"Task token {task_token} should be the only token in the text."
207
+ _text = task_prompt
208
+ break
209
+ # 2. task prompts with additional inputs
210
+ for task_token, task_prompt in self.task_prompts_with_input.items():
211
+ if task_token in _text:
212
+ _text = task_prompt.format(input=_text.replace(task_token, ''))
213
+ break
214
+ prompts.append(_text)
215
+ return prompts
216
+
217
+ def __call__(
218
+ self,
219
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
220
+ images: ImageInput = None,
221
+ tokenize_newline_separately: bool = True,
222
+ padding: Union[bool, str, PaddingStrategy] = False,
223
+ truncation: Union[bool, str, TruncationStrategy] = None,
224
+ max_length=None,
225
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
226
+ do_resize: bool = None,
227
+ do_normalize: bool = None,
228
+ image_mean: Optional[Union[float, List[float]]] = None,
229
+ image_std: Optional[Union[float, List[float]]] = None,
230
+ data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
231
+ input_data_format: Optional[
232
+ Union[str, "ChannelDimension"] # noqa: F821
233
+ ] = None,
234
+ resample: "PILImageResampling" = None, # noqa: F821
235
+ do_convert_rgb: bool = None,
236
+ do_thumbnail: bool = None,
237
+ do_align_long_axis: bool = None,
238
+ do_rescale: bool = None,
239
+ ) -> BatchFeature:
240
+ """
241
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
242
+ and `kwargs` arguments to BartTokenizerFast's [`~BartTokenizerFast.__call__`] if `text` is not `None` to encode
243
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
244
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
245
+ of the above two methods for more information.
246
+
247
+ Args:
248
+ text (`str`, `List[str]`, `List[List[str]]`):
249
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
250
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
251
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
252
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
253
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
254
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
255
+ number of channels, H and W are image height and width.
256
+ tokenize_newline_separately (`bool`, defaults to `True`):
257
+ Adds a separately tokenized '\n' at the end of the prompt.
258
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
259
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
260
+ index) among:
261
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
262
+ sequence if provided).
263
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
264
+ acceptable input length for the model if that argument is not provided.
265
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
266
+ lengths).
267
+ max_length (`int`, *optional*):
268
+ Maximum length of the returned list and optionally padding length (see above).
269
+ truncation (`bool`, *optional*):
270
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
271
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
272
+ If set, will return tensors of a particular framework. Acceptable values are:
273
+
274
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
275
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
276
+ - `'np'`: Return NumPy `np.ndarray` objects.
277
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
278
+
279
+ Returns:
280
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
281
+
282
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`
283
+ is provided, the `input_ids` will also contain the suffix input ids.
284
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
285
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
286
+ `None`).
287
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
288
+ - **labels** -- Labels compatible with training if `suffix` is not None
289
+ """
290
+
291
+ return_token_type_ids = False
292
+
293
+ if images is None:
294
+ raise ValueError("`images` are expected as arguments to a `DocfusionProcessor` instance.")
295
+ if text is None:
296
+ logger.warning_once(
297
+ "You are using Docfusion without a text prompt."
298
+ )
299
+ text = ""
300
+
301
+ if isinstance(text, List) and isinstance(images, List):
302
+ if len(images) < len(text):
303
+ raise ValueError(
304
+ f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image."
305
+ )
306
+ if _is_str_or_image(text):
307
+ text = [text]
308
+ elif isinstance(text, list) and _is_str_or_image(text[0]):
309
+ pass
310
+
311
+ pixel_values = self.image_processor(
312
+ images,
313
+ do_resize=do_resize,
314
+ do_normalize=do_normalize,
315
+ return_tensors=return_tensors,
316
+ image_mean=image_mean,
317
+ image_std=image_std,
318
+ input_data_format=input_data_format,
319
+ data_format=data_format,
320
+ resample=resample,
321
+ do_convert_rgb=do_convert_rgb,
322
+ )["pixel_values"]
323
+
324
+ if max_length is not None:
325
+ max_length -= self.image_seq_length # max_length has to account for the image tokens
326
+
327
+ text = self._construct_prompts(text)
328
+
329
+ inputs = self.tokenizer(
330
+ text,
331
+ return_tensors=return_tensors,
332
+ padding=padding,
333
+ max_length=max_length,
334
+ truncation=truncation,
335
+ return_token_type_ids=return_token_type_ids,
336
+ )
337
+
338
+ return_data = {**inputs, "pixel_values": pixel_values}
339
+
340
+ if return_token_type_ids:
341
+ labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
342
+ return_data.update({"labels": labels})
343
+ return BatchFeature(data=return_data)
344
+
345
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Docfusion
346
+ def batch_decode(self, *args, **kwargs):
347
+ """
348
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
349
+ refer to the docstring of this method for more information.
350
+ """
351
+ return self.tokenizer.batch_decode(*args, **kwargs)
352
+
353
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Docfusion
354
+ def decode(self, *args, **kwargs):
355
+ """
356
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
357
+ the docstring of this method for more information.
358
+ """
359
+ return self.tokenizer.decode(*args, **kwargs)
360
+
361
+ @property
362
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Docfusion
363
+ def model_input_names(self):
364
+ tokenizer_input_names = self.tokenizer.model_input_names
365
+ image_processor_input_names = self.image_processor.model_input_names
366
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
367
+
368
+ def post_process_generation(self, text, task, image_size):
369
+ """
370
+ Post-process the output of the model to each of the task outputs.
371
+
372
+ Args:
373
+ text (`str`): The text to post-process.
374
+ task (`str`): The task to post-process the text for.
375
+ image_size (`Tuple[int, int]`): The size of the image. height x width.
376
+ """
377
+
378
+ task_answer_post_processing_type = self.tasks_answer_post_processing_type.get(task, 'pure_text')
379
+ task_answer = self.post_processor(
380
+ text=text,
381
+ image_size=image_size,
382
+ parse_tasks=task_answer_post_processing_type,
383
+ )[task_answer_post_processing_type]
384
+
385
+ if task_answer_post_processing_type == 'pure_text':
386
+ final_answer = task_answer
387
+ # remove the special tokens
388
+ final_answer = final_answer.replace('<s>', '').replace('</s>', '')
389
+ elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
390
+ od_instances = task_answer
391
+ bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
392
+ labels_od = [str(_od_instance['cat_name']) for _od_instance in od_instances]
393
+ final_answer = {'bboxes': bboxes_od, 'labels': labels_od}
394
+ elif task_answer_post_processing_type in ['ocr']:
395
+ bboxes = [_od_instance['quad_box'] for _od_instance in task_answer]
396
+ labels = [str(_od_instance['text']) for _od_instance in task_answer]
397
+ final_answer = {'quad_boxes': bboxes, 'labels': labels}
398
+ elif task_answer_post_processing_type in ['phrase_grounding']:
399
+ bboxes = []
400
+ labels = []
401
+ for _grounded_phrase in task_answer:
402
+ for _bbox in _grounded_phrase['bbox']:
403
+ bboxes.append(_bbox)
404
+ labels.append(_grounded_phrase['cat_name'])
405
+ final_answer = {'bboxes': bboxes, 'labels': labels}
406
+ elif task_answer_post_processing_type in ['description_with_polygons', 'polygons']:
407
+ labels = []
408
+ polygons = []
409
+ for result in task_answer:
410
+ label = result['cat_name']
411
+ _polygons = result['polygons']
412
+ labels.append(label)
413
+ polygons.append(_polygons)
414
+ final_answer = {'polygons': polygons, 'labels': labels}
415
+ elif task_answer_post_processing_type in ['description_with_bboxes_or_polygons']:
416
+ bboxes = []
417
+ bboxes_labels = []
418
+ polygons = []
419
+ polygons_labels = []
420
+ for result in task_answer:
421
+ label = result['cat_name']
422
+ if 'polygons' in result:
423
+ _polygons = result['polygons']
424
+ polygons.append(_polygons)
425
+ polygons_labels.append(label)
426
+ else:
427
+ _bbox = result['bbox']
428
+ bboxes.append(_bbox)
429
+ bboxes_labels.append(label)
430
+ final_answer = {'bboxes': bboxes, 'bboxes_labels': bboxes_labels, 'polygons': polygons, 'polygons_labels': polygons_labels}
431
+ else:
432
+ raise ValueError('Unknown task answer post processing type: {}'.format(task_answer_post_processing_type))
433
+
434
+ final_answer = {
435
+ task: final_answer}
436
+ return final_answer
437
+
438
+ class BoxQuantizer(object):
439
+ def __init__(self, mode, bins):
440
+ self.mode = mode
441
+ self.bins = bins
442
+
443
+ def quantize(self, boxes: torch.Tensor, size):
444
+ bins_w, bins_h = self.bins # Quantization bins.
445
+ size_w, size_h = size # Original image size.
446
+ size_per_bin_w = size_w / bins_w
447
+ size_per_bin_h = size_h / bins_h
448
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
449
+
450
+ if self.mode == 'floor':
451
+ quantized_xmin = (
452
+ xmin / size_per_bin_w).floor().clamp(0, bins_w - 1)
453
+ quantized_ymin = (
454
+ ymin / size_per_bin_h).floor().clamp(0, bins_h - 1)
455
+ quantized_xmax = (
456
+ xmax / size_per_bin_w).floor().clamp(0, bins_w - 1)
457
+ quantized_ymax = (
458
+ ymax / size_per_bin_h).floor().clamp(0, bins_h - 1)
459
+
460
+ elif self.mode == 'round':
461
+ raise NotImplementedError()
462
+
463
+ else:
464
+ raise ValueError('Incorrect quantization type.')
465
+
466
+ quantized_boxes = torch.cat(
467
+ (quantized_xmin, quantized_ymin, quantized_xmax, quantized_ymax), dim=-1
468
+ ).int()
469
+
470
+ return quantized_boxes
471
+
472
+ def dequantize(self, boxes: torch.Tensor, size):
473
+ bins_w, bins_h = self.bins # Quantization bins.
474
+ size_w, size_h = size # Original image size.
475
+ size_per_bin_w = size_w / bins_w
476
+ size_per_bin_h = size_h / bins_h
477
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
478
+
479
+ if self.mode == 'floor':
480
+ # Add 0.5 to use the center position of the bin as the coordinate.
481
+ dequantized_xmin = (xmin + 0.5) * size_per_bin_w
482
+ dequantized_ymin = (ymin + 0.5) * size_per_bin_h
483
+ dequantized_xmax = (xmax + 0.5) * size_per_bin_w
484
+ dequantized_ymax = (ymax + 0.5) * size_per_bin_h
485
+
486
+ elif self.mode == 'round':
487
+ raise NotImplementedError()
488
+
489
+ else:
490
+ raise ValueError('Incorrect quantization type.')
491
+
492
+ dequantized_boxes = torch.cat(
493
+ (dequantized_xmin, dequantized_ymin,
494
+ dequantized_xmax, dequantized_ymax), dim=-1
495
+ )
496
+
497
+ return dequantized_boxes
498
+
499
+
500
+ class CoordinatesQuantizer(object):
501
+ """
502
+ Quantize coornidates (Nx2)
503
+ """
504
+
505
+ def __init__(self, mode, bins):
506
+ self.mode = mode
507
+ self.bins = bins
508
+
509
+ def quantize(self, coordinates: torch.Tensor, size):
510
+ bins_w, bins_h = self.bins # Quantization bins.
511
+ size_w, size_h = size # Original image size.
512
+ size_per_bin_w = size_w / bins_w
513
+ size_per_bin_h = size_h / bins_h
514
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
515
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
516
+
517
+ if self.mode == 'floor':
518
+ quantized_x = (x / size_per_bin_w).floor().clamp(0, bins_w - 1)
519
+ quantized_y = (y / size_per_bin_h).floor().clamp(0, bins_h - 1)
520
+
521
+ elif self.mode == 'round':
522
+ raise NotImplementedError()
523
+
524
+ else:
525
+ raise ValueError('Incorrect quantization type.')
526
+
527
+ quantized_coordinates = torch.cat(
528
+ (quantized_x, quantized_y), dim=-1
529
+ ).int()
530
+
531
+ return quantized_coordinates
532
+
533
+ def dequantize(self, coordinates: torch.Tensor, size):
534
+ bins_w, bins_h = self.bins # Quantization bins.
535
+ size_w, size_h = size # Original image size.
536
+ size_per_bin_w = size_w / bins_w
537
+ size_per_bin_h = size_h / bins_h
538
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
539
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
540
+
541
+ if self.mode == 'floor':
542
+ # Add 0.5 to use the center position of the bin as the coordinate.
543
+ dequantized_x = (x + 0.5) * size_per_bin_w
544
+ dequantized_y = (y + 0.5) * size_per_bin_h
545
+
546
+ elif self.mode == 'round':
547
+ raise NotImplementedError()
548
+
549
+ else:
550
+ raise ValueError('Incorrect quantization type.')
551
+
552
+ dequantized_coordinates = torch.cat(
553
+ (dequantized_x, dequantized_y), dim=-1
554
+ )
555
+
556
+ return dequantized_coordinates
557
+
558
+
559
+ class DocfusionPostProcesser(object):
560
+ """
561
+ Docfusion post process for converting text prediction to various tasks results.
562
+
563
+ Args:
564
+ config: A dict of configs.
565
+ tokenizer: A tokenizer for decoding text to spans.
566
+ sample config:
567
+ UNIFIED_POST_PROCESS:
568
+ # commom configs
569
+ NUM_BBOX_HEIGHT_BINS: 1000
570
+ NUM_BBOX_WIDTH_BINS: 1000
571
+ COORDINATES_HEIGHT_BINS: 1000
572
+ COORDINATES_WIDTH_BINS: 1000
573
+ # task specific configs, override the common configs
574
+ PRASE_TASKS:
575
+ - TASK_NAME: 'video_dense_caption'
576
+ PATTERN: 'r<time_(\d+)><time_(\d+)>([a-zA-Z0-9 ]+)'
577
+ SCORE_MODE: 'avg_cat_name_scores'
578
+ NUM_BINS: 100
579
+ - TASK_NAME: 'od'
580
+ PATTERN: 'r<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>([a-zA-Z0-9 ]+)'
581
+ SCORE_MODE: 'avg_cat_name_scores'
582
+
583
+ Returns:
584
+ parsed_dict (dict): A dict of parsed results.
585
+ """
586
+ def __init__(
587
+ self,
588
+ tokenizer=None
589
+ ):
590
+ parse_tasks = []
591
+ parse_task_configs = {}
592
+ config = self._create_default_config()
593
+ for task in config['PARSE_TASKS']:
594
+ parse_tasks.append(task['TASK_NAME'])
595
+ parse_task_configs[task['TASK_NAME']] = task
596
+
597
+ self.config = config
598
+ self.parse_tasks = parse_tasks
599
+ self.parse_tasks_configs = parse_task_configs
600
+
601
+ self.tokenizer = tokenizer
602
+ if self.tokenizer is not None:
603
+ self.all_special_tokens = set(self.tokenizer.all_special_tokens)
604
+
605
+ self.init_quantizers()
606
+ self.black_list_of_phrase_grounding = self._create_black_list_of_phrase_grounding()
607
+
608
+ def _create_black_list_of_phrase_grounding(self):
609
+ black_list = {}
610
+
611
+ if 'phrase_grounding' in self.parse_tasks and self.parse_tasks_configs['phrase_grounding']['FILTER_BY_BLACK_LIST']:
612
+ black_list = set(
613
+ ['it', 'I', 'me', 'mine',
614
+ 'you', 'your', 'yours',
615
+ 'he', 'him', 'his',
616
+ 'she', 'her', 'hers',
617
+ 'they', 'them', 'their', 'theirs',
618
+ 'one', 'oneself',
619
+ 'we', 'us', 'our', 'ours',
620
+ 'you', 'your', 'yours',
621
+ 'they', 'them', 'their', 'theirs',
622
+ 'mine', 'yours', 'his', 'hers', 'its',
623
+ 'ours', 'yours', 'theirs',
624
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
625
+ 'ourselves', 'yourselves', 'themselves',
626
+ 'this', 'that',
627
+ 'these', 'those',
628
+ 'who', 'whom', 'whose', 'which', 'what',
629
+ 'who', 'whom', 'whose', 'which', 'that',
630
+ 'all', 'another', 'any', 'anybody', 'anyone', 'anything',
631
+ 'each', 'everybody', 'everyone', 'everything',
632
+ 'few', 'many', 'nobody', 'none', 'one', 'several',
633
+ 'some', 'somebody', 'someone', 'something',
634
+ 'each other', 'one another',
635
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
636
+ 'ourselves', 'yourselves', 'themselves',
637
+ 'the image', 'image', 'images', 'the', 'a', 'an', 'a group',
638
+ 'other objects', 'lots', 'a set',
639
+ ]
640
+ )
641
+
642
+ return black_list
643
+
644
+ def _create_default_config(self):
645
+ config = {
646
+ 'NUM_BBOX_HEIGHT_BINS': 1000,
647
+ 'NUM_BBOX_WIDTH_BINS': 1000,
648
+ 'BOX_QUANTIZATION_MODE': 'floor',
649
+ 'COORDINATES_HEIGHT_BINS': 1000,
650
+ 'COORDINATES_WIDTH_BINS': 1000,
651
+ 'COORDINATES_QUANTIZATION_MODE': 'floor',
652
+ 'PARSE_TASKS': [
653
+ {
654
+ 'TASK_NAME': 'od',
655
+ 'PATTERN': r'([a-zA-Z0-9 ]+)<loc_(\\d+)><loc_(\\d+)><loc_(\\d+)><loc_(\\d+)>'
656
+ },
657
+ {
658
+ 'TASK_NAME': 'ocr',
659
+ 'PATTERN': r'(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>',
660
+ 'AREA_THRESHOLD': 0.00
661
+ },
662
+ {
663
+ 'TASK_NAME': 'phrase_grounding',
664
+ 'FILTER_BY_BLACK_LIST': True
665
+ },
666
+ {
667
+ 'TASK_NAME': 'pure_text',
668
+ },
669
+ {
670
+ 'TASK_NAME': 'description_with_bboxes',
671
+ },
672
+ {
673
+ 'TASK_NAME': 'description_with_polygons',
674
+ },
675
+ {
676
+ 'TASK_NAME': 'polygons',
677
+ },
678
+ {
679
+ 'TASK_NAME': 'bboxes',
680
+ },
681
+ {
682
+ 'TASK_NAME': 'description_with_bboxes_or_polygons',
683
+ }
684
+ ]
685
+ }
686
+
687
+ return config
688
+
689
+ def init_quantizers(self):
690
+ # we have box_quantizer (od, grounding) and coordinates_quantizer (ocr, referring_segmentation)
691
+ num_bbox_height_bins = self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
692
+ num_bbox_width_bins = self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
693
+ box_quantization_mode = self.config.get('BOX_QUANTIZATION_MODE', 'floor')
694
+ self.box_quantizer = BoxQuantizer(
695
+ box_quantization_mode,
696
+ (num_bbox_width_bins, num_bbox_height_bins),
697
+ )
698
+
699
+ num_bbox_height_bins = self.config['COORDINATES_HEIGHT_BINS'] if 'COORDINATES_HEIGHT_BINS' in self.config else self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
700
+ num_bbox_width_bins = self.config['COORDINATES_WIDTH_BINS'] if 'COORDINATES_WIDTH_BINS' in self.config else self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
701
+ box_quantization_mode = self.config.get('COORDINATES_QUANTIZATION_MODE') if 'COORDINATES_QUANTIZATION_MODE' in self.config else self.config.get('BOX_QUANTIZATION_MODE', 'floor')
702
+ self.coordinates_quantizer = CoordinatesQuantizer(
703
+ box_quantization_mode,
704
+ (num_bbox_width_bins, num_bbox_height_bins),
705
+ )
706
+
707
+ def decode_with_spans(self, tokenizer, token_ids):
708
+ filtered_tokens = tokenizer.convert_ids_to_tokens(
709
+ token_ids, skip_special_tokens=False)
710
+ assert len(filtered_tokens) == len(token_ids)
711
+
712
+ # To avoid mixing byte-level and unicode for byte-level BPT
713
+ # we need to build string separately for added tokens and byte-level tokens
714
+ # cf. https://github.com/huggingface/transformers/issues/1133
715
+ sub_texts = []
716
+ for token in filtered_tokens:
717
+ if token in self.all_special_tokens:
718
+ sub_texts.append(token)
719
+ else:
720
+ if isinstance(tokenizer, (BartTokenizer, BartTokenizerFast)):
721
+ sub_text = tokenizer.convert_tokens_to_string([token])
722
+ elif isinstance(tokenizer, (T5Tokenizer, T5TokenizerFast)):
723
+ # Ref: https://github.com/google/sentencepiece#whitespace-is-treated-as-a-basic-symbol
724
+ # Note: Do not strip sub_text as it may have functional whitespace
725
+ sub_text = token.replace('▁', ' ')
726
+ else:
727
+ raise ValueError(f'type {type(tokenizer)} not supported')
728
+ sub_texts.append(sub_text)
729
+
730
+ text = ''
731
+ spans = []
732
+ for sub_text in sub_texts:
733
+ span = (len(text), len(text) + len(sub_text)) # [start index, end index).
734
+ text += sub_text
735
+ spans.append(span)
736
+
737
+ # Text format:
738
+ # 1. T5Tokenizer/T5TokenizerFast:
739
+ # "<loc_1><loc_2><loc_3><loc_4> transplanting dog<loc_1><loc_2><loc_3><loc_4> cat</s>"
740
+ # Equivalent to t5_tokenizer.decode(input_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False, spaces_between_special_tokens=False)
741
+ # 2. BartTokenizer (need to double check):
742
+ # "<s><loc_1><loc_2><loc_3><loc_4>transplanting dog<loc_1><loc_2><loc_3><loc_4>cat</s>"
743
+ # Equivalent to bart_tokenizer.decode(input_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False, spaces_between_special_tokens=False)
744
+ return text, spans
745
+
746
+ def parse_od_from_text_and_spans(
747
+ self,
748
+ text,
749
+ pattern,
750
+ image_size,
751
+ phrase_centric=False
752
+ ):
753
+ parsed = list(re.finditer(pattern, text))
754
+
755
+ instances = []
756
+ for i in range(len(parsed)):
757
+ # Prepare instance.
758
+ instance = {}
759
+
760
+ if phrase_centric:
761
+ bbox_bins = [int(parsed[i].group(j)) for j in range(2, 6)]
762
+ else:
763
+ bbox_bins = [int(parsed[i].group(j)) for j in range(1, 5)]
764
+ instance['bbox'] = self.box_quantizer.dequantize(
765
+ boxes=torch.tensor(bbox_bins),
766
+ size=image_size
767
+ ).tolist()
768
+
769
+ if phrase_centric:
770
+ instance['cat_name'] = parsed[i].group(1).lower().strip()
771
+ else:
772
+ instance['cat_name'] = parsed[i].group(5).lower().strip()
773
+ instances.append(instance)
774
+
775
+ return instances
776
+
777
+ def parse_ocr_from_text_and_spans(self,
778
+ text,
779
+ pattern,
780
+ image_size,
781
+ area_threshold=-1.0,
782
+ ):
783
+ bboxes = []
784
+ labels = []
785
+ text = text.replace('<s>', '')
786
+ # ocr with regions
787
+ parsed = re.findall(pattern, text)
788
+ instances = []
789
+ image_width, image_height = image_size
790
+
791
+ for ocr_line in parsed:
792
+ ocr_content = ocr_line[0]
793
+ quad_box = ocr_line[1:]
794
+ quad_box = [int(i) for i in quad_box]
795
+ quad_box = self.coordinates_quantizer.dequantize(
796
+ torch.tensor(np.array(quad_box).reshape(-1, 2)),
797
+ size=image_size
798
+ ).reshape(-1).tolist()
799
+
800
+ if area_threshold > 0:
801
+ x_coords = [i for i in quad_box[0::2]]
802
+ y_coords = [i for i in quad_box[1::2]]
803
+
804
+ # apply the Shoelace formula
805
+ area = 0.5 * abs(sum(x_coords[i] * y_coords[i + 1] - x_coords[i + 1] * y_coords[i] for i in range(4 - 1)))
806
+
807
+ if area < (image_width * image_height) * area_threshold:
808
+ continue
809
+
810
+ bboxes.append(quad_box)
811
+ labels.append(ocr_content)
812
+ instances.append({
813
+ 'quad_box': quad_box,
814
+ 'text': ocr_content,
815
+ })
816
+ return instances
817
+
818
+ def parse_phrase_grounding_from_text_and_spans(self, text, pattern, image_size):
819
+ # ignore <s> </s> and <pad>
820
+ cur_span = 0
821
+ if text.startswith('<s>'):
822
+ cur_span += 3
823
+
824
+ text = text.replace('<s>', '')
825
+ text = text.replace('</s>', '')
826
+ text = text.replace('<pad>', '')
827
+
828
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
829
+ phrases = re.findall(pattern, text)
830
+
831
+ # pattern should be text pattern and od pattern
832
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
833
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
834
+
835
+ instances = []
836
+ for pharse_text in phrases:
837
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
838
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
839
+
840
+ if phrase_text_strip == '':
841
+ cur_span += len(pharse_text)
842
+ continue
843
+
844
+ # Prepare instance.
845
+ instance = {}
846
+
847
+ # parse phrase, get string
848
+ phrase = re.search(pattern, phrase_text_strip)
849
+ if phrase is None:
850
+ cur_span += len(pharse_text)
851
+ continue
852
+
853
+ # parse bboxes by box_pattern
854
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
855
+ if len(bboxes_parsed) == 0:
856
+ cur_span += len(pharse_text)
857
+ continue
858
+
859
+ phrase = phrase.group()
860
+ # remove leading and trailing spaces
861
+ phrase = phrase.strip()
862
+
863
+ if phrase in self.black_list_of_phrase_grounding:
864
+ cur_span += len(pharse_text)
865
+ continue
866
+
867
+ # a list of list
868
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
869
+ instance['bbox'] = self.box_quantizer.dequantize(
870
+ boxes=torch.tensor(bbox_bins),
871
+ size=image_size
872
+ ).tolist()
873
+
874
+ # exclude non-ascii characters
875
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
876
+ instance['cat_name'] = phrase
877
+
878
+ instances.append(instance)
879
+
880
+ return instances
881
+
882
+ def parse_description_with_bboxes_from_text_and_spans(self, text, pattern, image_size, allow_empty_phrase=False):
883
+ # temporary parse solution, split by '.'
884
+ # ignore <s> </s> and <pad>
885
+
886
+ text = text.replace('<s>', '')
887
+ text = text.replace('</s>', '')
888
+ text = text.replace('<pad>', '')
889
+
890
+ if allow_empty_phrase:
891
+ pattern = rf"(?:(?:<loc_\d+>){{4,}})"
892
+ else:
893
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
894
+ phrases = re.findall(pattern, text)
895
+
896
+ # pattern should be text pattern and od pattern
897
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
898
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
899
+
900
+ instances = []
901
+ for pharse_text in phrases:
902
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
903
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
904
+
905
+ if phrase_text_strip == '' and not allow_empty_phrase:
906
+ continue
907
+
908
+ # parse phrase, get string
909
+ phrase = re.search(pattern, phrase_text_strip)
910
+ if phrase is None:
911
+ continue
912
+
913
+ phrase = phrase.group()
914
+ # remove leading and trailing spaces
915
+ phrase = phrase.strip()
916
+
917
+ # parse bboxes by box_pattern
918
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
919
+ if len(bboxes_parsed) == 0:
920
+ continue
921
+
922
+ # a list of list
923
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
924
+
925
+ bboxes = self.box_quantizer.dequantize(
926
+ boxes=torch.tensor(bbox_bins),
927
+ size=image_size
928
+ ).tolist()
929
+
930
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
931
+ for _bboxes in bboxes:
932
+ # Prepare instance.
933
+ instance = {}
934
+ instance['bbox'] = _bboxes
935
+ # exclude non-ascii characters
936
+ instance['cat_name'] = phrase
937
+ instances.append(instance)
938
+
939
+ return instances
940
+
941
+ def parse_description_with_polygons_from_text_and_spans(self, text, pattern, image_size,
942
+ allow_empty_phrase=False,
943
+ polygon_sep_token='<sep>',
944
+ polygon_start_token='<poly>',
945
+ polygon_end_token='</poly>',
946
+ with_box_at_start=False,
947
+ ):
948
+
949
+ # ref_seg format: '<expression><x1><y1><x2><y2><><><sep><><><><>'
950
+ # ignore <s> </s> and <pad>
951
+
952
+ text = text.replace('<s>', '')
953
+ text = text.replace('</s>', '')
954
+ text = text.replace('<pad>', '')
955
+
956
+ if allow_empty_phrase:
957
+ pattern = rf"(?:(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
958
+ else:
959
+ # [^<]+: This part matches one or more characters that are not the < symbol.
960
+ # The ^ inside the square brackets [] is a negation, meaning it matches anything except <.
961
+ #
962
+ pattern = rf"([^<]+(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
963
+ phrases = re.findall(pattern, text)
964
+
965
+ phrase_string_pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_|<poly>)'
966
+ box_pattern = rf'((?:<loc_\d+>)+)(?:{re.escape(polygon_sep_token)}|$)'
967
+
968
+ # one polygons instance is separated by polygon_start_token and polygon_end_token
969
+ polygons_instance_pattern = rf'{re.escape(polygon_start_token)}(.*?){re.escape(polygon_end_token)}'
970
+
971
+ instances = []
972
+ for phrase_text in phrases:
973
+
974
+ # exclude loc_\d+>
975
+ # need to get span if want to include category score
976
+ phrase_text_strip = re.sub(r'^loc_\d+>', '', phrase_text, count=1)
977
+
978
+ # phrase = phrase.replace('<poly>', '')
979
+ # phrase = phrase.replace('poly>', '')
980
+
981
+ if phrase_text_strip == '' and not allow_empty_phrase:
982
+ continue
983
+
984
+
985
+ # parse phrase, get string
986
+ phrase = re.search(phrase_string_pattern, phrase_text_strip)
987
+ if phrase is None:
988
+ continue
989
+ phrase = phrase.group()
990
+ # remove leading and trailing spaces
991
+ phrase = phrase.strip()
992
+
993
+ # parse bboxes by box_pattern
994
+
995
+ # split by polygon_start_token and polygon_end_token first using polygons_instance_pattern
996
+ if polygon_start_token in phrase_text and polygon_end_token in phrase_text:
997
+ polygons_instances_parsed = list(re.finditer(polygons_instance_pattern, phrase_text))
998
+ else:
999
+ polygons_instances_parsed = [phrase_text]
1000
+
1001
+ for _polygons_instances_parsed in polygons_instances_parsed:
1002
+ # Prepare instance.
1003
+ instance = {}
1004
+
1005
+ # polygons_parsed= list(re.finditer(box_pattern, phrase_text))
1006
+ if isinstance(_polygons_instances_parsed, str):
1007
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed))
1008
+ else:
1009
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed.group(1)))
1010
+ if len(polygons_parsed) == 0:
1011
+ continue
1012
+
1013
+ # a list of list (polygon)
1014
+ bbox = []
1015
+ polygons = []
1016
+ for _polygon_parsed in polygons_parsed:
1017
+ # group 1: whole <loc_\d+>...</loc_\d+>
1018
+ _polygon = _polygon_parsed.group(1)
1019
+ # parse into list of int
1020
+ _polygon = [int(_loc_parsed.group(1)) for _loc_parsed in re.finditer(r'<loc_(\d+)>', _polygon)]
1021
+ if with_box_at_start and len(bbox) == 0:
1022
+ if len(_polygon) > 4:
1023
+ # no valid bbox prediction
1024
+ bbox = _polygon[:4]
1025
+ _polygon = _polygon[4:]
1026
+ else:
1027
+ bbox = [0, 0, 0, 0]
1028
+ # abandon last element if is not paired
1029
+ if len(_polygon) % 2 == 1:
1030
+ _polygon = _polygon[:-1]
1031
+
1032
+ # reshape into (n, 2)
1033
+ _polygon = self.coordinates_quantizer.dequantize(
1034
+ torch.tensor(np.array(_polygon).reshape(-1, 2)),
1035
+ size=image_size
1036
+ ).reshape(-1).tolist()
1037
+ # reshape back
1038
+ polygons.append(_polygon)
1039
+
1040
+ instance['cat_name'] = phrase
1041
+ instance['polygons'] = polygons
1042
+ if len(bbox) != 0:
1043
+ instance['bbox'] = self.box_quantizer.dequantize(
1044
+ boxes=torch.tensor([bbox]),
1045
+ size=image_size
1046
+ ).tolist()[0]
1047
+
1048
+ instances.append(instance)
1049
+
1050
+ return instances
1051
+
1052
+ def __call__(
1053
+ self,
1054
+ text=None,
1055
+ image_size=None,
1056
+ parse_tasks=None,
1057
+ ):
1058
+ """
1059
+ Args:
1060
+ text: model outputs
1061
+ image_size: (width, height)
1062
+ parse_tasks: a list of tasks to parse, if None, parse all tasks.
1063
+
1064
+ """
1065
+ if parse_tasks is not None:
1066
+ if isinstance(parse_tasks, str):
1067
+ parse_tasks = [parse_tasks]
1068
+ for _parse_task in parse_tasks:
1069
+ assert _parse_task in self.parse_tasks, f'parse task {_parse_task} not supported'
1070
+
1071
+ # sequence or text should be provided
1072
+ assert text is not None, 'text should be provided'
1073
+
1074
+ parsed_dict = {
1075
+ 'text': text
1076
+ }
1077
+
1078
+ for task in self.parse_tasks:
1079
+ if parse_tasks is not None and task not in parse_tasks:
1080
+ continue
1081
+
1082
+ pattern = self.parse_tasks_configs[task].get('PATTERN', None)
1083
+
1084
+ if task == 'ocr':
1085
+ instances = self.parse_ocr_from_text_and_spans(
1086
+ text,
1087
+ pattern=pattern,
1088
+ image_size=image_size,
1089
+ area_threshold=self.parse_tasks_configs[task].get('AREA_THRESHOLD', 0.0),
1090
+ )
1091
+ parsed_dict['ocr'] = instances
1092
+ elif task == 'phrase_grounding':
1093
+ instances = self.parse_phrase_grounding_from_text_and_spans(
1094
+ text,
1095
+ pattern=pattern,
1096
+ image_size=image_size,
1097
+ )
1098
+ parsed_dict['phrase_grounding'] = instances
1099
+ elif task == 'pure_text':
1100
+ parsed_dict['pure_text'] = text
1101
+ elif task == 'description_with_bboxes':
1102
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1103
+ text,
1104
+ pattern=pattern,
1105
+ image_size=image_size,
1106
+ )
1107
+ parsed_dict['description_with_bboxes'] = instances
1108
+ elif task == 'description_with_polygons':
1109
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1110
+ text,
1111
+ pattern=pattern,
1112
+ image_size=image_size,
1113
+ )
1114
+ parsed_dict['description_with_polygons'] = instances
1115
+ elif task == 'polygons':
1116
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1117
+ text,
1118
+ pattern=pattern,
1119
+ image_size=image_size,
1120
+ allow_empty_phrase=True,
1121
+ )
1122
+ parsed_dict['polygons'] = instances
1123
+ elif task == 'bboxes':
1124
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1125
+ text,
1126
+ pattern=pattern,
1127
+ image_size=image_size,
1128
+ allow_empty_phrase=True,
1129
+ )
1130
+ parsed_dict['bboxes'] = instances
1131
+ elif task == 'description_with_bboxes_or_polygons':
1132
+ if '<poly>' in text:
1133
+ # only support either polygons or bboxes, not both at the same time
1134
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1135
+ text,
1136
+ pattern=pattern,
1137
+ image_size=image_size,
1138
+ )
1139
+ else:
1140
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1141
+ text,
1142
+ pattern=pattern,
1143
+ image_size=image_size,
1144
+ )
1145
+ parsed_dict['description_with_bboxes_or_polygons'] = instances
1146
+ else:
1147
+ raise ValueError("task {} is not supported".format(task))
1148
+
1149
+ return parsed_dict
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78b12e68bb5c066b722b60edc0468025c45026d0fcfa2c6bb1cbc6b016f69f3
3
+ size 1455932782
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_max_length": 1024
3
+ }
4
+
vocab.json ADDED
The diff for this file is too large to render. See raw diff