drlon commited on
Commit
dc6b3d4
·
1 Parent(s): d4277d4

update all

Browse files
Files changed (9) hide show
  1. app.py +282 -52
  2. requirements.txt +15 -1
  3. util/__init__.py +0 -0
  4. util/arial.ttf +0 -0
  5. util/box_annotator.py +262 -0
  6. util/omniparser.py +32 -0
  7. util/process_utils.py +48 -0
  8. util/som.py +348 -0
  9. util/utils.py +542 -0
app.py CHANGED
@@ -1,64 +1,294 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import spaces
3
  import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ from PIL import Image
7
+ import io
8
+ import re
9
 
10
+ import base64, os
11
+ from util.utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
12
+ from util.som import MarkHelper, plot_boxes_with_marks, plot_circles_with_marks
13
+ from util.process_utils import pred_2_point, extract_bbox, extract_mark_id
14
 
15
+ import torch
16
+ from PIL import Image
17
 
18
+ from huggingface_hub import snapshot_download
19
+ import torch
20
+ from transformers import AutoModelForCausalLM
21
+ from transformers import AutoProcessor
 
 
 
 
 
22
 
23
+ # Define repository and local directory
24
+ repo_id = "microsoft/OmniParser-v2.0" # HF repo
25
+ local_dir = "weights" # Target local directory
 
 
26
 
27
+ som_generator = MarkHelper()
28
+ magma_som_prompt = "<image>\nIn this view I need to click a button to \"{}\"? Provide the coordinates and the mark index of the containing bounding box if applicable."
29
+ magma_qa_prompt = "<image>\n{} Answer the question briefly."
30
+ magma_model_id = "microsoft/Magma-8B"
31
+ magam_model = AutoModelForCausalLM.from_pretrained(magma_model_id, trust_remote_code=True)
32
+ magma_processor = AutoProcessor.from_pretrained(magma_model_id, trust_remote_code=True)
33
+ magam_model.to("cuda")
34
 
35
+ # Download the entire repository
36
+ snapshot_download(repo_id=repo_id, local_dir=local_dir)
37
 
38
+ print(f"Repository downloaded to: {local_dir}")
 
 
 
 
 
 
 
39
 
 
 
40
 
41
+ yolo_model = get_yolo_model(model_path='weights/icon_detect/model.pt')
42
+ caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption")
43
+ # caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
44
 
45
+ MARKDOWN = """
46
+ <div align="center">
47
+ <h2>Magma: A Foundation Model for Multimodal AI Agents</h2>
48
+
49
+ [Jianwei Yang](https://jwyang.github.io/)<sup>*</sup><sup>1</sup><sup>†</sup>&nbsp;
50
+ [Reuben Tan](https://cs-people.bu.edu/rxtan/)<sup>1</sup><sup>†</sup>&nbsp;
51
+ [Qianhui Wu](https://qianhuiwu.github.io/)<sup>1</sup><sup>†</sup>&nbsp;
52
+ [Ruijie Zheng](https://ruijiezheng.com/)<sup>2</sup><sup>‡</sup>&nbsp;
53
+ [Baolin Peng](https://scholar.google.com/citations?user=u1CNjgwAAAAJ&hl=en&oi=ao)<sup>1</sup><sup>‡</sup>&nbsp;
54
+ [Yongyuan Liang](https://cheryyunl.github.io)<sup>2</sup><sup>‡</sup>
55
+ [Yu Gu](https://users.umiacs.umd.edu/~hal/)<sup>1</sup>&nbsp;
56
+ [Mu Cai](https://pages.cs.wisc.edu/~mucai/)<sup>3</sup>&nbsp;
57
+ [Seonghyeon Ye](https://seonghyeonye.github.io/)<sup>4</sup>&nbsp;
58
+ [Joel Jang](https://joeljang.github.io/)<sup>5</sup>&nbsp;
59
+ [Yuquan Deng](https://scholar.google.com/citations?user=LTC0Q6YAAAAJ&hl=en)<sup>5</sup>&nbsp;
60
+ [Lars Liden](https://sites.google.com/site/larsliden)<sup>1</sup>&nbsp;
61
+ [Jianfeng Gao](https://www.microsoft.com/en-us/research/people/jfgao/)<sup>1</sup><sup>▽</sup>
62
+
63
+ <sup>1</sup> Microsoft Research; <sup>2</sup> University of Maryland; <sup>3</sup> University of Wisconsin-Madison; <sup>4</sup> KAIST; <sup>5</sup> University of Washington
64
+
65
+ <sup>*</sup> Project lead <sup>†</sup> First authors <sup>‡</sup> Second authors <sup>▽</sup> Leadership
66
+
67
+ \[[arXiv Paper](https://www.arxiv.org/pdf/2502.13130)\] &nbsp; \[[Project Page](https://microsoft.github.io/Magma/)\] &nbsp; \[[Github Repo](https://github.com/microsoft/Magma)\] &nbsp; \[[Hugging Face Model](https://huggingface.co/microsoft/Magma-8B)\] &nbsp;
68
+
69
+ This demo is powered by [Gradio](https://gradio.app/) and uses OmniParserv2 to generate Set-of-Mark prompts.
70
+ </div>
71
  """
72
+
73
+ DEVICE = torch.device('cuda')
74
+
75
+ @spaces.GPU
76
+ @torch.inference_mode()
77
+ def get_som_response(instruction, image_som):
78
+ prompt = magma_som_prompt.format(instruction)
79
+ if magam_model.config.mm_use_image_start_end:
80
+ qs = prompt.replace('<image>', '<image_start><image><image_end>')
81
+ else:
82
+ qs = prompt
83
+ convs = [{"role": "user", "content": qs}]
84
+ convs = [{"role": "system", "content": "You are agent that can see, talk and act."}] + convs
85
+ prompt = magma_processor.tokenizer.apply_chat_template(
86
+ convs,
87
+ tokenize=False,
88
+ add_generation_prompt=True
89
+ )
90
+
91
+ inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
92
+ inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
93
+ inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
94
+ inputs = inputs.to("cuda")
95
+
96
+ magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id
97
+ with torch.inference_mode():
98
+ output_ids = magam_model.generate(
99
+ **inputs,
100
+ temperature=0.0,
101
+ do_sample=False,
102
+ num_beams=1,
103
+ max_new_tokens=128,
104
+ use_cache=True
105
+ )
106
+
107
+ prompt_decoded = magma_processor.batch_decode(inputs['input_ids'], skip_special_tokens=True)[0]
108
+ response = magma_processor.batch_decode(output_ids, skip_special_tokens=True)[0]
109
+ response = response.replace(prompt_decoded, '').strip()
110
+ return response
111
+
112
+ @spaces.GPU
113
+ @torch.inference_mode()
114
+ def get_qa_response(instruction, image):
115
+ prompt = magma_qa_prompt.format(instruction)
116
+ if magam_model.config.mm_use_image_start_end:
117
+ qs = prompt.replace('<image>', '<image_start><image><image_end>')
118
+ else:
119
+ qs = prompt
120
+ convs = [{"role": "user", "content": qs}]
121
+ convs = [{"role": "system", "content": "You are agent that can see, talk and act."}] + convs
122
+ prompt = magma_processor.tokenizer.apply_chat_template(
123
+ convs,
124
+ tokenize=False,
125
+ add_generation_prompt=True
126
+ )
127
+
128
+ inputs = magma_processor(images=[image], texts=prompt, return_tensors="pt")
129
+ inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
130
+ inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
131
+ inputs = inputs.to("cuda")
132
+
133
+ magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id
134
+ with torch.inference_mode():
135
+ output_ids = magam_model.generate(
136
+ **inputs,
137
+ temperature=0.0,
138
+ do_sample=False,
139
+ num_beams=1,
140
+ max_new_tokens=128,
141
+ use_cache=True
142
+ )
143
+
144
+ prompt_decoded = magma_processor.batch_decode(inputs['input_ids'], skip_special_tokens=True)[0]
145
+ response = magma_processor.batch_decode(output_ids, skip_special_tokens=True)[0]
146
+ response = response.replace(prompt_decoded, '').strip()
147
+ return response
148
+
149
+ @spaces.GPU
150
+ @torch.inference_mode()
151
+ # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
152
+ def process(
153
+ image_input,
154
+ box_threshold,
155
+ iou_threshold,
156
+ use_paddleocr,
157
+ imgsz,
158
+ instruction,
159
+ ) -> Optional[Image.Image]:
160
+
161
+ # image_save_path = 'imgs/saved_image_demo.png'
162
+ # image_input.save(image_save_path)
163
+ # image = Image.open(image_save_path)
164
+ box_overlay_ratio = image_input.size[0] / 3200
165
+ draw_bbox_config = {
166
+ 'text_scale': 0.8 * box_overlay_ratio,
167
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
168
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
169
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
170
+ }
171
+
172
+ ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_input, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
173
+ text, ocr_bbox = ocr_bbox_rslt
174
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_input, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=False, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,)
175
+ parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)])
176
+
177
+ if len(instruction) == 0:
178
+ print('finish processing')
179
+ image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
180
+ return image, str(parsed_content_list)
181
+
182
+ elif instruction.startswith('Q:'):
183
+ response = get_qa_response(instruction, image_input)
184
+ return image_input, response
185
+
186
+ # parsed_content_list = str(parsed_content_list)
187
+ # convert xywh to yxhw
188
+ label_coordinates_yxhw = {}
189
+ for key, val in label_coordinates.items():
190
+ if val[2] < 0 or val[3] < 0:
191
+ continue
192
+ label_coordinates_yxhw[key] = [val[1], val[0], val[3], val[2]]
193
+ image_som = plot_boxes_with_marks(image_input.copy(), [val for key, val in label_coordinates_yxhw.items()], som_generator, edgecolor=(255,0,0), fn_save=None, normalized_to_pixel=False)
194
+
195
+ # convert xywh to xyxy
196
+ for key, val in label_coordinates.items():
197
+ label_coordinates[key] = [val[0], val[1], val[0] + val[2], val[1] + val[3]]
198
+
199
+ # normalize label_coordinates
200
+ for key, val in label_coordinates.items():
201
+ label_coordinates[key] = [val[0] / image_input.size[0], val[1] / image_input.size[1], val[2] / image_input.size[0], val[3] / image_input.size[1]]
202
+
203
+ magma_response = get_som_response(instruction, image_som)
204
+ print("magma repsonse: ", magma_response)
205
+
206
+ # map magma_response into the mark id
207
+ mark_id = extract_mark_id(magma_response)
208
+ if mark_id is not None:
209
+ if str(mark_id) in label_coordinates:
210
+ bbox_for_mark = label_coordinates[str(mark_id)]
211
+ else:
212
+ bbox_for_mark = None
213
+ else:
214
+ bbox_for_mark = None
215
+
216
+ if bbox_for_mark:
217
+ # draw bbox_for_mark on the image
218
+ image_som = plot_boxes_with_marks(
219
+ image_input,
220
+ [label_coordinates_yxhw[str(mark_id)]],
221
+ som_generator,
222
+ edgecolor=(255,127,111),
223
+ alpha=30,
224
+ fn_save=None,
225
+ normalized_to_pixel=False,
226
+ add_mark=False
227
+ )
228
+ else:
229
+ try:
230
+ if 'box' in magma_response:
231
+ pred_bbox = extract_bbox(magma_response)
232
+ click_point = [(pred_bbox[0][0] + pred_bbox[1][0]) / 2, (pred_bbox[0][1] + pred_bbox[1][1]) / 2]
233
+ click_point = [item / 1000 for item in click_point]
234
+ else:
235
+ click_point = pred_2_point(magma_response)
236
+ # de-normalize click_point (width, height)
237
+ click_point = [click_point[0] * image_input.size[0], click_point[1] * image_input.size[1]]
238
+
239
+ image_som = plot_circles_with_marks(
240
+ image_input,
241
+ [click_point],
242
+ som_generator,
243
+ edgecolor=(255,127,111),
244
+ linewidth=3,
245
+ fn_save=None,
246
+ normalized_to_pixel=False,
247
+ add_mark=False
248
+ )
249
+ except:
250
+ image_som = image_input
251
+
252
+ return image_som, str(parsed_content_list)
253
+
254
+ with gr.Blocks() as demo:
255
+ gr.Markdown(MARKDOWN)
256
+ with gr.Row():
257
+ with gr.Column():
258
+ image_input_component = gr.Image(
259
+ type='pil', label='Upload image')
260
+ # set the threshold for removing the bounding boxes with low confidence, default is 0.05
261
+ with gr.Accordion("Parameters", open=False) as parameter_row:
262
+ box_threshold_component = gr.Slider(
263
+ label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05)
264
+ # set the threshold for removing the bounding boxes with large overlap, default is 0.1
265
+ iou_threshold_component = gr.Slider(
266
+ label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
267
+ use_paddleocr_component = gr.Checkbox(
268
+ label='Use PaddleOCR', value=True)
269
+ imgsz_component = gr.Slider(
270
+ label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640)
271
+ # text box
272
+ text_input_component = gr.Textbox(label='Text Input', placeholder='Text Input')
273
+ submit_button_component = gr.Button(
274
+ value='Submit', variant='primary')
275
+ with gr.Column():
276
+ image_output_component = gr.Image(type='pil', label='Image Output')
277
+ text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
278
+
279
+ submit_button_component.click(
280
+ fn=process,
281
+ inputs=[
282
+ image_input_component,
283
+ box_threshold_component,
284
+ iou_threshold_component,
285
+ use_paddleocr_component,
286
+ imgsz_component,
287
+ text_input_component
288
+ ],
289
+ outputs=[image_output_component, text_output_component]
290
+ )
291
+
292
+ # demo.launch(debug=False, show_error=True, share=True)
293
+ # demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
294
+ demo.queue().launch(share=False)
requirements.txt CHANGED
@@ -1 +1,15 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub
2
+ spaces
3
+ gradio
4
+ numpy
5
+ torch
6
+ Pillow
7
+ re
8
+ ultralytics
9
+ transformers
10
+ opencv-python
11
+ supervision
12
+ easyocr
13
+ paddleocr
14
+ requests
15
+ networkx
util/__init__.py ADDED
File without changes
util/arial.ttf ADDED
Binary file (312 kB). View file
 
util/box_annotator.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union, Tuple
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from supervision.detection.core import Detections
7
+ from supervision.draw.color import Color, ColorPalette
8
+
9
+
10
+ class BoxAnnotator:
11
+ """
12
+ A class for drawing bounding boxes on an image using detections provided.
13
+
14
+ Attributes:
15
+ color (Union[Color, ColorPalette]): The color to draw the bounding box,
16
+ can be a single color or a color palette
17
+ thickness (int): The thickness of the bounding box lines, default is 2
18
+ text_color (Color): The color of the text on the bounding box, default is white
19
+ text_scale (float): The scale of the text on the bounding box, default is 0.5
20
+ text_thickness (int): The thickness of the text on the bounding box,
21
+ default is 1
22
+ text_padding (int): The padding around the text on the bounding box,
23
+ default is 5
24
+
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
30
+ thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
31
+ text_color: Color = Color.BLACK,
32
+ text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
33
+ text_thickness: int = 2, #1, # 2 for demo
34
+ text_padding: int = 10,
35
+ avoid_overlap: bool = True,
36
+ ):
37
+ self.color: Union[Color, ColorPalette] = color
38
+ self.thickness: int = thickness
39
+ self.text_color: Color = text_color
40
+ self.text_scale: float = text_scale
41
+ self.text_thickness: int = text_thickness
42
+ self.text_padding: int = text_padding
43
+ self.avoid_overlap: bool = avoid_overlap
44
+
45
+ def annotate(
46
+ self,
47
+ scene: np.ndarray,
48
+ detections: Detections,
49
+ labels: Optional[List[str]] = None,
50
+ skip_label: bool = False,
51
+ image_size: Optional[Tuple[int, int]] = None,
52
+ ) -> np.ndarray:
53
+ """
54
+ Draws bounding boxes on the frame using the detections provided.
55
+
56
+ Args:
57
+ scene (np.ndarray): The image on which the bounding boxes will be drawn
58
+ detections (Detections): The detections for which the
59
+ bounding boxes will be drawn
60
+ labels (Optional[List[str]]): An optional list of labels
61
+ corresponding to each detection. If `labels` are not provided,
62
+ corresponding `class_id` will be used as label.
63
+ skip_label (bool): Is set to `True`, skips bounding box label annotation.
64
+ Returns:
65
+ np.ndarray: The image with the bounding boxes drawn on it
66
+
67
+ Example:
68
+ ```python
69
+ import supervision as sv
70
+
71
+ classes = ['person', ...]
72
+ image = ...
73
+ detections = sv.Detections(...)
74
+
75
+ box_annotator = sv.BoxAnnotator()
76
+ labels = [
77
+ f"{classes[class_id]} {confidence:0.2f}"
78
+ for _, _, confidence, class_id, _ in detections
79
+ ]
80
+ annotated_frame = box_annotator.annotate(
81
+ scene=image.copy(),
82
+ detections=detections,
83
+ labels=labels
84
+ )
85
+ ```
86
+ """
87
+ font = cv2.FONT_HERSHEY_SIMPLEX
88
+ for i in range(len(detections)):
89
+ x1, y1, x2, y2 = detections.xyxy[i].astype(int)
90
+ class_id = (
91
+ detections.class_id[i] if detections.class_id is not None else None
92
+ )
93
+ idx = class_id if class_id is not None else i
94
+ color = (
95
+ self.color.by_idx(idx)
96
+ if isinstance(self.color, ColorPalette)
97
+ else self.color
98
+ )
99
+ cv2.rectangle(
100
+ img=scene,
101
+ pt1=(x1, y1),
102
+ pt2=(x2, y2),
103
+ color=color.as_bgr(),
104
+ thickness=self.thickness,
105
+ )
106
+ if skip_label:
107
+ continue
108
+
109
+ text = (
110
+ f"{class_id}"
111
+ if (labels is None or len(detections) != len(labels))
112
+ else labels[i]
113
+ )
114
+
115
+ text_width, text_height = cv2.getTextSize(
116
+ text=text,
117
+ fontFace=font,
118
+ fontScale=self.text_scale,
119
+ thickness=self.text_thickness,
120
+ )[0]
121
+
122
+ if not self.avoid_overlap:
123
+ text_x = x1 + self.text_padding
124
+ text_y = y1 - self.text_padding
125
+
126
+ text_background_x1 = x1
127
+ text_background_y1 = y1 - 2 * self.text_padding - text_height
128
+
129
+ text_background_x2 = x1 + 2 * self.text_padding + text_width
130
+ text_background_y2 = y1
131
+ # text_x = x1 - self.text_padding - text_width
132
+ # text_y = y1 + self.text_padding + text_height
133
+ # text_background_x1 = x1 - 2 * self.text_padding - text_width
134
+ # text_background_y1 = y1
135
+ # text_background_x2 = x1
136
+ # text_background_y2 = y1 + 2 * self.text_padding + text_height
137
+ else:
138
+ text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
139
+
140
+ cv2.rectangle(
141
+ img=scene,
142
+ pt1=(text_background_x1, text_background_y1),
143
+ pt2=(text_background_x2, text_background_y2),
144
+ color=color.as_bgr(),
145
+ thickness=cv2.FILLED,
146
+ )
147
+ # import pdb; pdb.set_trace()
148
+ box_color = color.as_rgb()
149
+ luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
150
+ text_color = (0,0,0) if luminance > 160 else (255,255,255)
151
+ cv2.putText(
152
+ img=scene,
153
+ text=text,
154
+ org=(text_x, text_y),
155
+ fontFace=font,
156
+ fontScale=self.text_scale,
157
+ # color=self.text_color.as_rgb(),
158
+ color=text_color,
159
+ thickness=self.text_thickness,
160
+ lineType=cv2.LINE_AA,
161
+ )
162
+ return scene
163
+
164
+
165
+ def box_area(box):
166
+ return (box[2] - box[0]) * (box[3] - box[1])
167
+
168
+ def intersection_area(box1, box2):
169
+ x1 = max(box1[0], box2[0])
170
+ y1 = max(box1[1], box2[1])
171
+ x2 = min(box1[2], box2[2])
172
+ y2 = min(box1[3], box2[3])
173
+ return max(0, x2 - x1) * max(0, y2 - y1)
174
+
175
+ def IoU(box1, box2, return_max=True):
176
+ intersection = intersection_area(box1, box2)
177
+ union = box_area(box1) + box_area(box2) - intersection
178
+ if box_area(box1) > 0 and box_area(box2) > 0:
179
+ ratio1 = intersection / box_area(box1)
180
+ ratio2 = intersection / box_area(box2)
181
+ else:
182
+ ratio1, ratio2 = 0, 0
183
+ if return_max:
184
+ return max(intersection / union, ratio1, ratio2)
185
+ else:
186
+ return intersection / union
187
+
188
+
189
+ def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
190
+ """ check overlap of text and background detection box, and get_optimal_label_pos,
191
+ pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
192
+ Threshold: default to 0.3
193
+ """
194
+
195
+ def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
196
+ is_overlap = False
197
+ for i in range(len(detections)):
198
+ detection = detections.xyxy[i].astype(int)
199
+ if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
200
+ is_overlap = True
201
+ break
202
+ # check if the text is out of the image
203
+ if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
204
+ is_overlap = True
205
+ return is_overlap
206
+
207
+ # if pos == 'top left':
208
+ text_x = x1 + text_padding
209
+ text_y = y1 - text_padding
210
+
211
+ text_background_x1 = x1
212
+ text_background_y1 = y1 - 2 * text_padding - text_height
213
+
214
+ text_background_x2 = x1 + 2 * text_padding + text_width
215
+ text_background_y2 = y1
216
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
217
+ if not is_overlap:
218
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
219
+
220
+ # elif pos == 'outer left':
221
+ text_x = x1 - text_padding - text_width
222
+ text_y = y1 + text_padding + text_height
223
+
224
+ text_background_x1 = x1 - 2 * text_padding - text_width
225
+ text_background_y1 = y1
226
+
227
+ text_background_x2 = x1
228
+ text_background_y2 = y1 + 2 * text_padding + text_height
229
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
230
+ if not is_overlap:
231
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
232
+
233
+
234
+ # elif pos == 'outer right':
235
+ text_x = x2 + text_padding
236
+ text_y = y1 + text_padding + text_height
237
+
238
+ text_background_x1 = x2
239
+ text_background_y1 = y1
240
+
241
+ text_background_x2 = x2 + 2 * text_padding + text_width
242
+ text_background_y2 = y1 + 2 * text_padding + text_height
243
+
244
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
245
+ if not is_overlap:
246
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
247
+
248
+ # elif pos == 'top right':
249
+ text_x = x2 - text_padding - text_width
250
+ text_y = y1 - text_padding
251
+
252
+ text_background_x1 = x2 - 2 * text_padding - text_width
253
+ text_background_y1 = y1 - 2 * text_padding - text_height
254
+
255
+ text_background_x2 = x2
256
+ text_background_y2 = y1
257
+
258
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
259
+ if not is_overlap:
260
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
261
+
262
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
util/omniparser.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from util.utils import get_som_labeled_img, get_caption_model_processor, get_yolo_model, check_ocr_box
2
+ import torch
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ from typing import Dict
7
+ class Omniparser(object):
8
+ def __init__(self, config: Dict):
9
+ self.config = config
10
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
+
12
+ self.som_model = get_yolo_model(model_path=config['som_model_path'])
13
+ self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
14
+ print('Omniparser initialized!!!')
15
+
16
+ def parse(self, image_base64: str):
17
+ image_bytes = base64.b64decode(image_base64)
18
+ image = Image.open(io.BytesIO(image_bytes))
19
+ print('image size:', image.size)
20
+
21
+ box_overlay_ratio = max(image.size) / 3200
22
+ draw_bbox_config = {
23
+ 'text_scale': 0.8 * box_overlay_ratio,
24
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
25
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
26
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
27
+ }
28
+
29
+ (text, ocr_bbox), _ = check_ocr_box(image, display_img=False, output_bb_format='xyxy', easyocr_args={'text_threshold': 0.8}, use_paddleocr=False)
30
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image, self.som_model, BOX_TRESHOLD = self.config['BOX_TRESHOLD'], output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
31
+
32
+ return dino_labled_img, parsed_content_list
util/process_utils.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ # is instruction English
4
+ def is_english_simple(text):
5
+ try:
6
+ text.encode(encoding='utf-8').decode('ascii')
7
+ except UnicodeDecodeError:
8
+ return False
9
+ else:
10
+ return True
11
+
12
+ # bbox -> point (str)
13
+ def bbox_2_point(bbox, dig=2):
14
+ # bbox [left, top, right, bottom]
15
+ point = [(bbox[0]+bbox[2])/2, (bbox[1]+bbox[3])/2]
16
+ point = [f"{item:.2f}" for item in point]
17
+ point_str = "({},{})".format(point[0], point[1])
18
+ return point_str
19
+
20
+ # bbox -> bbox (str)
21
+ def bbox_2_bbox(bbox, dig=2):
22
+ bbox = [f"{item:.2f}" for item in bbox]
23
+ bbox_str = "({},{},{},{})".format(bbox[0], bbox[1], bbox[2], bbox[3])
24
+ return bbox_str
25
+
26
+ # point (str) -> point
27
+ def pred_2_point(s):
28
+ floats = re.findall(r'-?\d+\.?\d*', s)
29
+ floats = [float(num) for num in floats]
30
+ if len(floats) == 2:
31
+ click_point = floats
32
+ elif len(floats) == 4:
33
+ click_point = [(floats[0]+floats[2])/2, (floats[1]+floats[3])/2]
34
+ return click_point
35
+
36
+ # bbox (qwen str) -> bbox
37
+ def extract_bbox(s):
38
+ # Regular expression to find the content inside <box> and </box>
39
+ pattern = r"<box>\((\d+,\d+)\),\((\d+,\d+)\)</box>"
40
+ matches = re.findall(pattern, s)
41
+ # Convert the tuples of strings into tuples of integers
42
+ return [(int(x.split(',')[0]), int(x.split(',')[1])) for x in sum(matches, ())]
43
+
44
+ def extract_mark_id(s):
45
+ match = re.search(r'Mark: (\d+)', s)
46
+ if match:
47
+ return int(match.group(1))
48
+ return None
util/som.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ultralytics import YOLO
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ device = 'cuda'
7
+
8
+ from PIL import Image, ImageDraw, ImageFont
9
+ import numpy as np
10
+ import networkx as nx
11
+ # import cv2
12
+
13
+ font_path = "agents/ui_agent/util/arial.ttf"
14
+ class MarkHelper:
15
+ def __init__(self):
16
+ self.markSize_dict = {}
17
+ self.font_dict = {}
18
+ self.min_font_size = 20 # 1 in v1
19
+ self.max_font_size = 30
20
+ self.max_font_proportion = 0.04 # 0.032 in v1
21
+
22
+ def __get_markSize(self, text, image_height, image_width, font):
23
+ im = Image.new('RGB', (image_width, image_height))
24
+ draw = ImageDraw.Draw(im)
25
+ _, _, width, height = draw.textbbox((0, 0), text=text, font=font)
26
+ return height, width
27
+
28
+ def _setup_new_font(self, image_height, image_width):
29
+ key = f"{image_height}_{image_width}"
30
+ # print(f"Setting up new font for image size: {key}")
31
+
32
+ # setup the font
33
+ fontsize = self.min_font_size
34
+ font = ImageFont.truetype(font_path, fontsize)
35
+ # font = ImageFont.load_default(size=fontsize)
36
+ while min(self.__get_markSize("555", image_height, image_width, font)) < min(self.max_font_size, self.max_font_proportion * min(image_height, image_width)):
37
+ # iterate until the text size is just larger than the criteria
38
+ fontsize += 1
39
+ font = ImageFont.truetype(font_path, fontsize)
40
+ # font = ImageFont.load_default(size=fontsize)
41
+ self.font_dict[key] = font
42
+
43
+ # setup the markSize dict
44
+ markSize_3digits = self.__get_markSize('555', image_height, image_width, font)
45
+ markSize_2digits = self.__get_markSize('55', image_height, image_width, font)
46
+ markSize_1digit = self.__get_markSize('5', image_height, image_width, font)
47
+ self.markSize_dict[key] = {
48
+ 1: markSize_1digit,
49
+ 2: markSize_2digits,
50
+ 3: markSize_3digits
51
+ }
52
+
53
+ def get_font(self, image_height, image_width):
54
+ key = f"{image_height}_{image_width}"
55
+ if key not in self.font_dict:
56
+ self._setup_new_font(image_height, image_width)
57
+ return self.font_dict[key]
58
+
59
+ def get_mark_size(self, text_str, image_height, image_width):
60
+ """Get the font size for the given image dimensions."""
61
+ key = f"{image_height}_{image_width}"
62
+ if key not in self.markSize_dict:
63
+ self._setup_new_font(image_height, image_width)
64
+
65
+ largest_size = self.markSize_dict[key].get(3, None)
66
+ text_h, text_w = self.markSize_dict[key].get(len(text_str), largest_size) # default to the largest size if the text is too long
67
+ return text_h, text_w
68
+
69
+ def __calculate_iou(box1, box2, return_area=False):
70
+ """
71
+ Calculate the Intersection over Union (IoU) of two bounding boxes.
72
+ :param box1: Tuple of (y, x, h, w) for the first bounding box
73
+ :param box2: Tuple of (y, x, h, w) for the second bounding box
74
+ :return: IoU value
75
+ """
76
+ y1, x1, h1, w1 = box1
77
+ y2, x2, h2, w2 = box2
78
+
79
+ # Calculate the intersection area
80
+ y_min = max(y1, y2)
81
+ x_min = max(x1, x2)
82
+ y_max = min(y1 + h1, y2 + h2)
83
+ x_max = min(x1 + w1, x2 + w2)
84
+
85
+ intersection_area = max(0, y_max - y_min) * max(0, x_max - x_min)
86
+
87
+ # Compute the area of both bounding boxes
88
+ box1_area = h1 * w1
89
+ box2_area = h2 * w2
90
+
91
+ # Calculate the IoU
92
+ # iou = intersection_area / box1_area + box2_area - intersection_area
93
+ iou = intersection_area / (min(box1_area, box2_area) + 0.0001)
94
+
95
+ if return_area:
96
+ return iou, intersection_area
97
+ return iou
98
+
99
+ def __calculate_nearest_corner_distance(box1, box2):
100
+ """Calculate the distance between the nearest edge or corner of two bounding boxes."""
101
+ y1, x1, h1, w1 = box1
102
+ y2, x2, h2, w2 = box2
103
+ corners1 = np.array([
104
+ [y1, x1],
105
+ [y1, x1 + w1],
106
+ [y1 + h1, x1],
107
+ [y1 + h1, x1 + w1]
108
+ ])
109
+ corners2 = np.array([
110
+ [y2, x2],
111
+ [y2, x2 + w2],
112
+ [y2 + h2, x2],
113
+ [y2 + h2, x2 + w2]
114
+ ])
115
+ # Calculate pairwise distances between corners
116
+ distances = np.linalg.norm(corners1[:, np.newaxis] - corners2, axis=2)
117
+
118
+ # Find the minimum distance
119
+ min_distance = np.min(distances)
120
+ return min_distance
121
+
122
+ def _find_least_overlapping_corner(bbox, bboxes, drawn_boxes, text_size, image_size):
123
+ """Find the corner with the least overlap with other bboxes.
124
+ Args:
125
+ bbox: (y, x, h, w) The bounding box to place the text on.
126
+ bboxes: [(y, x, h, w)] The list of bounding boxes to compare against.
127
+ drawn_boxes: [(y, x, h, w)] The list of bounding boxes that have already been drawn on.
128
+ text_size: (height, width) The size of the text to be drawn.
129
+ image_size: (height, width) The size of the image.
130
+ """
131
+ y, x, h, w = bbox
132
+ h_text, w_text = text_size
133
+ image_height, image_width = image_size
134
+ corners = [
135
+ # top-left
136
+ (y - h_text, x),
137
+ # top-right
138
+ (y - h_text, x + w - w_text),
139
+ # right-top
140
+ (y, x + w),
141
+ # right-bottom
142
+ (y + h - h_text, x + w),
143
+ # bottom-right
144
+ (y + h, x + w - w_text),
145
+ # bottom-left
146
+ (y + h, x),
147
+ # left-bottom
148
+ (y + h - h_text, x - w_text),
149
+ # left-top
150
+ (y, x - w_text),
151
+ ]
152
+ best_corner = corners[0]
153
+ max_flag = float('inf')
154
+
155
+ for corner in corners:
156
+ corner_bbox = (corner[0], corner[1], h_text, w_text)
157
+ # if the corner is out of the image, skip
158
+ if corner[0] < 0 or corner[1] < 0 or corner[0] + h_text > image_height or corner[1] + w_text > image_width:
159
+ continue
160
+ max_iou = - (image_width + image_height)
161
+ # 找到关于这个角最差的 case
162
+ # given the current corner, find the larget iou with other bboxes.
163
+ for other_bbox in bboxes + drawn_boxes:
164
+ if np.array_equal(bbox, other_bbox):
165
+ continue
166
+ iou = __calculate_iou(corner_bbox, other_bbox, return_area=True)[1]
167
+ max_iou = max(max_iou, iou - 0.0001 * __calculate_nearest_corner_distance(corner_bbox, other_bbox))
168
+ # the smaller the max_IOU, the better the corner
169
+ # 取最差的值 相对最好的那个角
170
+ if max_iou < max_flag:
171
+ max_flag = max_iou
172
+ best_corner = corner
173
+
174
+ return best_corner
175
+
176
+ def plot_boxes_with_marks(
177
+ image: Image.Image,
178
+ bboxes, # (y, x, h, w)
179
+ mark_helper: MarkHelper,
180
+ linewidth=2,
181
+ alpha=0,
182
+ edgecolor=None,
183
+ fn_save=None,
184
+ normalized_to_pixel=True,
185
+ add_mark=True
186
+ ) -> np.ndarray:
187
+ """Plots bounding boxes on an image with marks attached to the edges of the boxes where no overlap with other boxes occurs.
188
+ Args:
189
+ image: The image to plot the bounding boxes on.
190
+ bboxes: A 2D int array of shape (num_boxes, 4), where each row represents a bounding box: (y_top_left, x_top_left, box_height, box_width). If normalized_to_pixel is True, the values are float and are normalized with the image size. If normalized_to_pixel is False, the values are int and are in pixel.
191
+ """
192
+ # Then modify the drawing code
193
+ draw = ImageDraw.Draw(image)
194
+
195
+ # draw boxes on the image
196
+ image_width, image_height = image.size
197
+
198
+ if normalized_to_pixel:
199
+ bboxes = [(int(y * image_height), int(x * image_width), int(h * image_height), int(w * image_width)) for y, x, h, w in bboxes]
200
+
201
+ for box in bboxes:
202
+ y, x, h, w = box
203
+ draw.rectangle([x, y, x + w, y + h], outline=edgecolor, width=linewidth)
204
+
205
+ # Draw the bounding boxes with index at the least overlapping corner
206
+ drawn_boxes = []
207
+ for idx, bbox in enumerate(bboxes):
208
+ text = str(idx)
209
+ text_h, text_w = mark_helper.get_mark_size(text, image_height, image_width)
210
+ corner_y, corner_x = _find_least_overlapping_corner(
211
+ bbox, bboxes, drawn_boxes, (text_h, text_w), (image_height, image_width))
212
+
213
+ # Define the index box (y, x, y + h, x + w)
214
+ text_box = (corner_y, corner_x, text_h, text_w)
215
+
216
+ if add_mark:
217
+ # Draw the filled index box and text
218
+ draw.rectangle([corner_x, corner_y, corner_x + text_w, corner_y + text_h], # (x, y, x + w, y + h)
219
+ fill="red")
220
+ font = mark_helper.get_font(image_height, image_width)
221
+ draw.text((corner_x, corner_y), text, fill='white', font=font)
222
+
223
+ # Update the list of drawn boxes
224
+ drawn_boxes.append(np.array(text_box))
225
+
226
+ if fn_save is not None: # PIL image
227
+ image.save(fn_save)
228
+ return image
229
+
230
+ def plot_circles_with_marks(
231
+ image: Image.Image,
232
+ points, # (x, y)
233
+ mark_helper: MarkHelper,
234
+ linewidth=2,
235
+ edgecolor=None,
236
+ fn_save=None,
237
+ normalized_to_pixel=True,
238
+ add_mark=True
239
+ ) -> np.ndarray:
240
+ """Plots bounding boxes on an image with marks attached to the edges of the boxes where no overlap with other boxes occurs.
241
+ Args:
242
+ image: The image to plot the bounding boxes on.
243
+ bboxes: A 2D int array of shape (num_boxes, 4), where each row represents a bounding box: (y_top_left, x_top_left, box_height, box_width). If normalized_to_pixel is True, the values are float and are normalized with the image size. If normalized_to_pixel is False, the values are int and are in pixel.
244
+ """
245
+ # draw boxes on the image
246
+ image_width, image_height = image.size
247
+
248
+ if normalized_to_pixel:
249
+ bboxes = [(int(y * image_height), int(x * image_width), int(h * image_height), int(w * image_width)) for y, x, h, w in bboxes]
250
+
251
+ draw = ImageDraw.Draw(image)
252
+ for point in points:
253
+ x, y = point
254
+ draw.circle((x, y), radius=5, outline=edgecolor, width=linewidth)
255
+
256
+ if fn_save is not None: # PIL image
257
+ image.save(fn_save)
258
+ return image
259
+
260
+ markhelper = MarkHelper()
261
+
262
+ BBOX_DEDUPLICATION_IOU_PROPORTION = 0.5
263
+ BBOX_GROUPING_VERTICAL_THRESHOLD = 20
264
+ BBOX_GROUPING_HORIZONTAL_THRESHOLD = 20
265
+ BBOX_AUG_TARGET = 2.0
266
+
267
+ def _is_boxes_same_line_or_near(bbox1, bbox2, vertical_threshold, horizontal_threshold):
268
+ """check if two boxes are in the same line or close enough to be considered together"""
269
+ y1, x1, h1, w1 = bbox1
270
+ y2, x2, h2, w2 = bbox2
271
+
272
+ # Check if the boxes are close horizontally (consider the edge case where the boxes are touching)
273
+ horizontally_close = (x1 <= x2 and x2 - x1 <= w1 + horizontal_threshold) or (x2 <= x1 and x1 - x2 <= w2 + horizontal_threshold)
274
+
275
+ # Check if the boxes are close vertically (consider the edge case where the boxes are touching)
276
+ vertically_close = (y1 <= y2 and y2 - y1 <= h1 + vertical_threshold) or (y2 <= y1 and y1 - y2 <= h2 + vertical_threshold)
277
+
278
+ # Consider the boxes to be in the same line if they are vertically close and either overlap or are close horizontally
279
+ return vertically_close and horizontally_close
280
+
281
+ def _build_adjacency_matrix(bboxes, vertical_threshold, horizontal_threshold):
282
+ """Build the adjacency matrix based on the merging criteria."""
283
+ num_boxes = len(bboxes)
284
+ A = np.zeros((num_boxes, num_boxes), dtype=int)
285
+
286
+ for i in range(num_boxes):
287
+ for j in range(i + 1, num_boxes):
288
+ if _is_boxes_same_line_or_near(bboxes[i], bboxes[j], vertical_threshold, horizontal_threshold):
289
+ A[i, j] = 1
290
+ A[j, i] = 1 # Symmetric matrix
291
+
292
+ return A
293
+
294
+ def merge_connected_bboxes(bboxes, text_details,
295
+ vertical_threshold=BBOX_GROUPING_VERTICAL_THRESHOLD,
296
+ horizontal_threshold=BBOX_GROUPING_HORIZONTAL_THRESHOLD
297
+ ):
298
+ """Merge bboxes based on the adjacency matrix and return merged bboxes.
299
+ Args:
300
+ bboxes: A 2D array of shape (num_boxes, 4), where each row represents a bounding box: (y, x, height, width).
301
+ text_details: A list of text details for each bounding box.
302
+ vertical_threshold: The maximum vertical distance between two boxes to be considered in the same line.
303
+ horizontal_threshold: The maximum horizontal distance between two boxes to be considered close.
304
+ """
305
+ # return if there are no bboxes
306
+ if len(bboxes) <= 1:
307
+ return bboxes, text_details
308
+
309
+ # Convert bboxes (x1, y1, x2, y2) to (y, x, height, width) format
310
+ bboxes = np.array(bboxes)
311
+ bboxes = np.array([bboxes[:, 1], bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1], bboxes[:, 2] - bboxes[:, 0]]).T
312
+
313
+ # Build adjacency matrix
314
+ A = _build_adjacency_matrix(bboxes, vertical_threshold, horizontal_threshold)
315
+
316
+ # Create graph from adjacency matrix
317
+ G = nx.from_numpy_array(A)
318
+
319
+ # Find connected components
320
+ components = list(nx.connected_components(G))
321
+
322
+ # Convert bboxes to (y_min, x_min, y_max, x_max) format
323
+ corners = np.copy(bboxes)
324
+ corners_y, corners_x, corners_h, corners_w = corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]
325
+
326
+ corners_y_max = corners_y + corners_h
327
+ corners_x_max = corners_x + corners_w
328
+
329
+ # Merge bboxes for each connected component
330
+ merged_bboxes = []
331
+ merged_text_details = []
332
+ for component in components:
333
+ indices = list(component) # e.g., [32, 33, 34, 30, 31]
334
+ indices = sorted(indices)
335
+
336
+ # merge the text details
337
+ merged_text_details.append(' '.join([text_details[i] for i in indices]))
338
+
339
+ # merge the bboxes
340
+ y_min = min(corners_y[i] for i in indices)
341
+ x_min = min(corners_x[i] for i in indices)
342
+ y_max = max(corners_y_max[i] for i in indices)
343
+ x_max = max(corners_x_max[i] for i in indices)
344
+ merged_bboxes.append((y_min, x_min, y_max - y_min, x_max - x_min)) # Convert merged_bbox back to (y, x, height, width) format
345
+
346
+ # convert (y, x, height, width) to (x1, y1, x2, y2) format without np.array
347
+ merged_bboxes = [(bbox[1], bbox[0], bbox[1] + bbox[3], bbox[0] + bbox[2]) for bbox in merged_bboxes]
348
+ return merged_bboxes, merged_text_details
util/utils.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ # utility function
10
+ import os
11
+
12
+ import json
13
+ import sys
14
+ import os
15
+ import cv2
16
+ import numpy as np
17
+ # %matplotlib inline
18
+ from matplotlib import pyplot as plt
19
+ import easyocr
20
+ from paddleocr import PaddleOCR
21
+ reader = easyocr.Reader(['en'])
22
+ paddle_ocr = PaddleOCR(
23
+ lang='en', # other lang also available
24
+ use_angle_cls=False,
25
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
26
+ show_log=False,
27
+ max_batch_size=1024,
28
+ use_dilation=True, # improves accuracy
29
+ det_db_score_mode='slow', # improves accuracy
30
+ rec_batch_num=1024)
31
+ import time
32
+ import base64
33
+
34
+ import os
35
+ import ast
36
+ import torch
37
+ from typing import Tuple, List, Union
38
+ from torchvision.ops import box_convert
39
+ import re
40
+ from torchvision.transforms import ToPILImage
41
+ import supervision as sv
42
+ import torchvision.transforms as T
43
+ from util.box_annotator import BoxAnnotator
44
+
45
+
46
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
47
+ if not device:
48
+ device = "cuda" if torch.cuda.is_available() else "cpu"
49
+ if model_name == "blip2":
50
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
51
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
52
+ if device == 'cpu':
53
+ model = Blip2ForConditionalGeneration.from_pretrained(
54
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
55
+ )
56
+ else:
57
+ model = Blip2ForConditionalGeneration.from_pretrained(
58
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
59
+ ).to(device)
60
+ elif model_name == "florence2":
61
+ from transformers import AutoProcessor, AutoModelForCausalLM
62
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
63
+ if device == 'cpu':
64
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
65
+ else:
66
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
67
+ return {'model': model.to(device), 'processor': processor}
68
+
69
+
70
+ def get_yolo_model(model_path):
71
+ from ultralytics import YOLO
72
+ # Load the model.
73
+ model = YOLO(model_path)
74
+ return model
75
+
76
+
77
+ @torch.inference_mode()
78
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=None):
79
+ # Number of samples per batch, --> 256 roughly takes 23 GB of GPU memory for florence model
80
+ to_pil = ToPILImage()
81
+ if starting_idx:
82
+ non_ocr_boxes = filtered_boxes[starting_idx:]
83
+ else:
84
+ non_ocr_boxes = filtered_boxes
85
+ croped_pil_image = []
86
+ for i, coord in enumerate(non_ocr_boxes):
87
+ try:
88
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
89
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
90
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
91
+ cropped_image = cv2.resize(cropped_image, (64, 64))
92
+ croped_pil_image.append(to_pil(cropped_image))
93
+ except:
94
+ continue
95
+
96
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
97
+ if not prompt:
98
+ if 'florence' in model.config.name_or_path:
99
+ prompt = "<CAPTION>"
100
+ else:
101
+ prompt = "The image shows"
102
+
103
+ generated_texts = []
104
+ device = model.device
105
+ # batch_size = 64
106
+ for i in range(0, len(croped_pil_image), batch_size):
107
+ start = time.time()
108
+ batch = croped_pil_image[i:i+batch_size]
109
+ t1 = time.time()
110
+ if model.device.type == 'cuda':
111
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
112
+ else:
113
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
114
+ # if 'florence' in model.config.name_or_path:
115
+ generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
116
+ # else:
117
+ # generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
118
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
119
+ generated_text = [gen.strip() for gen in generated_text]
120
+ generated_texts.extend(generated_text)
121
+
122
+ return generated_texts
123
+
124
+
125
+
126
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
127
+ to_pil = ToPILImage()
128
+ if ocr_bbox:
129
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
130
+ else:
131
+ non_ocr_boxes = filtered_boxes
132
+ croped_pil_image = []
133
+ for i, coord in enumerate(non_ocr_boxes):
134
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
135
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
136
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
137
+ croped_pil_image.append(to_pil(cropped_image))
138
+
139
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
140
+ device = model.device
141
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
142
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
143
+
144
+ batch_size = 5 # Number of samples per batch
145
+ generated_texts = []
146
+
147
+ for i in range(0, len(croped_pil_image), batch_size):
148
+ images = croped_pil_image[i:i+batch_size]
149
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
150
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
151
+ texts = [prompt] * len(images)
152
+ for i, txt in enumerate(texts):
153
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
154
+ inputs['input_ids'].append(input['input_ids'])
155
+ inputs['attention_mask'].append(input['attention_mask'])
156
+ inputs['pixel_values'].append(input['pixel_values'])
157
+ inputs['image_sizes'].append(input['image_sizes'])
158
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
159
+ for i, v in enumerate(inputs['input_ids']):
160
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
161
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
162
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
163
+
164
+ generation_args = {
165
+ "max_new_tokens": 25,
166
+ "temperature": 0.01,
167
+ "do_sample": False,
168
+ }
169
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
170
+ # # remove input tokens
171
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
172
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
173
+ response = [res.strip('\n').strip() for res in response]
174
+ generated_texts.extend(response)
175
+
176
+ return generated_texts
177
+
178
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
179
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
180
+
181
+ def box_area(box):
182
+ return (box[2] - box[0]) * (box[3] - box[1])
183
+
184
+ def intersection_area(box1, box2):
185
+ x1 = max(box1[0], box2[0])
186
+ y1 = max(box1[1], box2[1])
187
+ x2 = min(box1[2], box2[2])
188
+ y2 = min(box1[3], box2[3])
189
+ return max(0, x2 - x1) * max(0, y2 - y1)
190
+
191
+ def IoU(box1, box2):
192
+ intersection = intersection_area(box1, box2)
193
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
194
+ if box_area(box1) > 0 and box_area(box2) > 0:
195
+ ratio1 = intersection / box_area(box1)
196
+ ratio2 = intersection / box_area(box2)
197
+ else:
198
+ ratio1, ratio2 = 0, 0
199
+ return max(intersection / union, ratio1, ratio2)
200
+
201
+ def is_inside(box1, box2):
202
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
203
+ intersection = intersection_area(box1, box2)
204
+ ratio1 = intersection / box_area(box1)
205
+ return ratio1 > 0.95
206
+
207
+ boxes = boxes.tolist()
208
+ filtered_boxes = []
209
+ if ocr_bbox:
210
+ filtered_boxes.extend(ocr_bbox)
211
+ # print('ocr_bbox!!!', ocr_bbox)
212
+ for i, box1 in enumerate(boxes):
213
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
214
+ is_valid_box = True
215
+ for j, box2 in enumerate(boxes):
216
+ # keep the smaller box
217
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
218
+ is_valid_box = False
219
+ break
220
+ if is_valid_box:
221
+ # add the following 2 lines to include ocr bbox
222
+ if ocr_bbox:
223
+ # only add the box if it does not overlap with any ocr bbox
224
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
225
+ filtered_boxes.append(box1)
226
+ else:
227
+ filtered_boxes.append(box1)
228
+ return torch.tensor(filtered_boxes)
229
+
230
+
231
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
232
+ '''
233
+ ocr_bbox format: [{'type': 'text', 'bbox':[x,y], 'interactivity':False, 'content':str }, ...]
234
+ boxes format: [{'type': 'icon', 'bbox':[x,y], 'interactivity':True, 'content':None }, ...]
235
+
236
+ '''
237
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
238
+
239
+ def box_area(box):
240
+ return (box[2] - box[0]) * (box[3] - box[1])
241
+
242
+ def intersection_area(box1, box2):
243
+ x1 = max(box1[0], box2[0])
244
+ y1 = max(box1[1], box2[1])
245
+ x2 = min(box1[2], box2[2])
246
+ y2 = min(box1[3], box2[3])
247
+ return max(0, x2 - x1) * max(0, y2 - y1)
248
+
249
+ def IoU(box1, box2):
250
+ intersection = intersection_area(box1, box2)
251
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
252
+ if box_area(box1) > 0 and box_area(box2) > 0:
253
+ ratio1 = intersection / box_area(box1)
254
+ ratio2 = intersection / box_area(box2)
255
+ else:
256
+ ratio1, ratio2 = 0, 0
257
+ return max(intersection / union, ratio1, ratio2)
258
+
259
+ def is_inside(box1, box2):
260
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
261
+ intersection = intersection_area(box1, box2)
262
+ ratio1 = intersection / box_area(box1)
263
+ return ratio1 > 0.80
264
+
265
+ # boxes = boxes.tolist()
266
+ filtered_boxes = []
267
+ if ocr_bbox:
268
+ filtered_boxes.extend(ocr_bbox)
269
+ # print('ocr_bbox!!!', ocr_bbox)
270
+ for i, box1_elem in enumerate(boxes):
271
+ box1 = box1_elem['bbox']
272
+ is_valid_box = True
273
+ for j, box2_elem in enumerate(boxes):
274
+ # keep the smaller box
275
+ box2 = box2_elem['bbox']
276
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
277
+ is_valid_box = False
278
+ break
279
+ if is_valid_box:
280
+ if ocr_bbox:
281
+ # keep yolo boxes + prioritize ocr label
282
+ box_added = False
283
+ ocr_labels = ''
284
+ for box3_elem in ocr_bbox:
285
+ if not box_added:
286
+ box3 = box3_elem['bbox']
287
+ if is_inside(box3, box1): # ocr inside icon
288
+ # box_added = True
289
+ # delete the box3_elem from ocr_bbox
290
+ try:
291
+ # gather all ocr labels
292
+ ocr_labels += box3_elem['content'] + ' '
293
+ filtered_boxes.remove(box3_elem)
294
+ except:
295
+ continue
296
+ # break
297
+ elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
298
+ box_added = True
299
+ break
300
+ else:
301
+ continue
302
+ if not box_added:
303
+ if ocr_labels:
304
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels,})
305
+ else:
306
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, })
307
+ else:
308
+ filtered_boxes.append(box1)
309
+ return filtered_boxes # torch.tensor(filtered_boxes)
310
+
311
+
312
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
313
+ transform = T.Compose(
314
+ [
315
+ T.RandomResize([800], max_size=1333),
316
+ T.ToTensor(),
317
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
318
+ ]
319
+ )
320
+ image_source = Image.open(image_path).convert("RGB")
321
+ image = np.asarray(image_source)
322
+ image_transformed, _ = transform(image_source, None)
323
+ return image, image_transformed
324
+
325
+
326
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
327
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
328
+ """
329
+ This function annotates an image with bounding boxes and labels.
330
+
331
+ Parameters:
332
+ image_source (np.ndarray): The source image to be annotated.
333
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
334
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
335
+ phrases (List[str]): A list of labels for each bounding box.
336
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
337
+
338
+ Returns:
339
+ np.ndarray: The annotated image.
340
+ """
341
+ h, w, _ = image_source.shape
342
+ boxes = boxes * torch.Tensor([w, h, w, h])
343
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
344
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
345
+ detections = sv.Detections(xyxy=xyxy)
346
+
347
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
348
+
349
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
350
+ annotated_frame = image_source.copy()
351
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
352
+
353
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
354
+ return annotated_frame, label_coordinates
355
+
356
+
357
+ def predict(model, image, caption, box_threshold, text_threshold):
358
+ """ Use huggingface model to replace the original model
359
+ """
360
+ model, processor = model['model'], model['processor']
361
+ device = model.device
362
+
363
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
364
+ with torch.no_grad():
365
+ outputs = model(**inputs)
366
+
367
+ results = processor.post_process_grounded_object_detection(
368
+ outputs,
369
+ inputs.input_ids,
370
+ box_threshold=box_threshold, # 0.4,
371
+ text_threshold=text_threshold, # 0.3,
372
+ target_sizes=[image.size[::-1]]
373
+ )[0]
374
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
375
+ return boxes, logits, phrases
376
+
377
+
378
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
379
+ """ Use huggingface model to replace the original model
380
+ """
381
+ # model = model['model']
382
+ if scale_img:
383
+ result = model.predict(
384
+ source=image,
385
+ conf=box_threshold,
386
+ imgsz=imgsz,
387
+ iou=iou_threshold, # default 0.7
388
+ )
389
+ else:
390
+ result = model.predict(
391
+ source=image,
392
+ conf=box_threshold,
393
+ iou=iou_threshold, # default 0.7
394
+ )
395
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
396
+ conf = result[0].boxes.conf
397
+ phrases = [str(i) for i in range(len(boxes))]
398
+
399
+ return boxes, conf, phrases
400
+
401
+ def int_box_area(box, w, h):
402
+ x1, y1, x2, y2 = box
403
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
404
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
405
+ return area
406
+
407
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=64):
408
+ """Process either an image path or Image object
409
+
410
+ Args:
411
+ image_source: Either a file path (str) or PIL Image object
412
+ ...
413
+ """
414
+ if isinstance(image_source, str):
415
+ image_source = Image.open(image_source).convert("RGB")
416
+
417
+ w, h = image_source.size
418
+ if not imgsz:
419
+ imgsz = (h, w)
420
+ # print('image size:', w, h)
421
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
422
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
423
+ image_source = np.asarray(image_source)
424
+ phrases = [str(i) for i in range(len(phrases))]
425
+
426
+ # annotate the image with labels
427
+ if ocr_bbox:
428
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
429
+ ocr_bbox=ocr_bbox.tolist()
430
+ else:
431
+ print('no ocr bbox!!!')
432
+ ocr_bbox = None
433
+
434
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt,} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
435
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
436
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
437
+
438
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
439
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
440
+ # get the index of the first 'content': None
441
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
442
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
443
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
444
+
445
+ # get parsed icon local semantics
446
+ time1 = time.time()
447
+ if use_local_semantics:
448
+ caption_model = caption_model_processor['model']
449
+ if 'phi3_v' in caption_model.config.model_type:
450
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
451
+ else:
452
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
453
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
454
+ icon_start = len(ocr_text)
455
+ parsed_content_icon_ls = []
456
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
457
+ for i, box in enumerate(filtered_boxes_elem):
458
+ if box['content'] is None:
459
+ box['content'] = parsed_content_icon.pop(0)
460
+ for i, txt in enumerate(parsed_content_icon):
461
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
462
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
463
+ else:
464
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
465
+ parsed_content_merged = ocr_text
466
+ print('time to get parsed content:', time.time()-time1)
467
+
468
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
469
+
470
+ phrases = [i for i in range(len(filtered_boxes))]
471
+
472
+ # draw boxes
473
+ if draw_bbox_config:
474
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
475
+ else:
476
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
477
+
478
+ pil_img = Image.fromarray(annotated_frame)
479
+ buffered = io.BytesIO()
480
+ pil_img.save(buffered, format="PNG")
481
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
482
+ if output_coord_in_ratio:
483
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
484
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
485
+
486
+ return encoded_image, label_coordinates, filtered_boxes_elem
487
+
488
+
489
+ def get_xywh(input):
490
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
491
+ x, y, w, h = int(x), int(y), int(w), int(h)
492
+ return x, y, w, h
493
+
494
+ def get_xyxy(input):
495
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
496
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
497
+ return x, y, xp, yp
498
+
499
+ def get_xywh_yolo(input):
500
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
501
+ x, y, w, h = int(x), int(y), int(w), int(h)
502
+ return x, y, w, h
503
+
504
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
505
+ if isinstance(image_source, str):
506
+ image_source = Image.open(image_source)
507
+ if image_source.mode == 'RGBA':
508
+ # Convert RGBA to RGB to avoid alpha channel issues
509
+ image_source = image_source.convert('RGB')
510
+ image_np = np.array(image_source)
511
+ w, h = image_source.size
512
+ if use_paddleocr:
513
+ if easyocr_args is None:
514
+ text_threshold = 0.5
515
+ else:
516
+ text_threshold = easyocr_args['text_threshold']
517
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
518
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
519
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
520
+ else: # EasyOCR
521
+ if easyocr_args is None:
522
+ easyocr_args = {}
523
+ result = reader.readtext(image_np, **easyocr_args)
524
+ coord = [item[0] for item in result]
525
+ text = [item[1] for item in result]
526
+ if display_img:
527
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
528
+ bb = []
529
+ for item in coord:
530
+ x, y, a, b = get_xywh(item)
531
+ bb.append((x, y, a, b))
532
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
533
+ # matplotlib expects RGB
534
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
535
+ else:
536
+ if output_bb_format == 'xywh':
537
+ bb = [get_xywh(item) for item in coord]
538
+ elif output_bb_format == 'xyxy':
539
+ bb = [get_xyxy(item) for item in coord]
540
+ return (text, bb), goal_filtering
541
+
542
+