dianecy commited on
Commit
3ec4928
·
verified ·
1 Parent(s): 3b5fc39

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .history/datasets/a2d_20250203174308.py +247 -0
  3. .history/datasets/ytvos_ref_20250113131134.py +241 -0
  4. .history/datasets/ytvos_ref_20250113131327.py +241 -0
  5. .history/datasets/ytvos_ref_20250113141118.py +241 -0
  6. .history/datasets/ytvos_ref_20250113162417.py +241 -0
  7. .history/datasets/ytvos_ref_20250113163313.py +248 -0
  8. .history/datasets/ytvos_ref_20250114201904.py +252 -0
  9. .history/datasets/ytvos_ref_20250114201908.py +253 -0
  10. .history/datasets/ytvos_ref_20250114202340.py +251 -0
  11. .history/datasets/ytvos_ref_20250114205314.py +250 -0
  12. .history/datasets/ytvos_ref_20250114211305.py +252 -0
  13. .history/datasets/ytvos_ref_20250116074326.py +239 -0
  14. .history/mbench/gpt_ref-ytvos-cy_20250121151513.py +433 -0
  15. .history/mbench/gpt_ref-ytvos-revised_20250121160858.py +428 -0
  16. .history/mbench/gpt_ref-ytvos_20250119070820.py +286 -0
  17. .history/mbench/gpt_ref-ytvos_numbered_cy_20250130183936.py +199 -0
  18. .history/mbench/gpt_ref-ytvos_numbered_cy_20250130190533.py +429 -0
  19. .history/mbench/gpt_ref-ytvos_numbered_cy_20250130190813.py +427 -0
  20. .history/mbench/gpt_ref-ytvos_numbered_cy_20250130220417.py +427 -0
  21. .history/mbench/gpt_ref-ytvos_numbered_cy_20250201140559.py +461 -0
  22. .history/mbench/gpt_ref-ytvos_numbered_cy_20250201141240.py +460 -0
  23. .history/mbench/gpt_ref-ytvos_numbered_cy_sanity_2_20250207172754.py +656 -0
  24. .history/mbench/make_ref-ytvos_json_20250113182322.py +100 -0
  25. .history/mbench/make_ref-ytvos_json_20250113182734.py +102 -0
  26. .history/mbench/make_ref-ytvos_json_20250113182817.py +103 -0
  27. .history/mbench/make_ref-ytvos_json_20250113182842.py +102 -0
  28. .history/mbench/make_ref-ytvos_json_20250113183130.py +102 -0
  29. .history/mbench/make_ref-ytvos_json_20250116141513.py +103 -0
  30. .history/mbench/make_ref-ytvos_json_20250118024325.py +108 -0
  31. .history/mbench/ytvos_ref_20250121152309.py +264 -0
  32. .history/mbench_a2d/gpt_a2d_numbered_20250205111640.py +82 -0
  33. .history/mbench_a2d/gpt_a2d_numbered_20250205122340.py +196 -0
  34. .history/mbench_a2d/gpt_a2d_numbered_20250205152326.py +200 -0
  35. .history/mbench_a2d/gpt_a2d_numbered_20250207110257.py +213 -0
  36. .history/slurm_script/jupyter_20250121151552.sh +16 -0
  37. .history/slurm_script/jupyter_20250121151643.sh +16 -0
  38. .history/slurm_script/mbench_gpt_a2d_20250205122515.sh +19 -0
  39. .history/slurm_script/mbench_gpt_ref-ytvos-revised_20250121155940.sh +18 -0
  40. .history/slurm_script/mbench_gpt_ref-ytvos-revised_20250121160841.sh +18 -0
  41. .history/slurm_script/mbench_gpt_ref-ytvos-revised_20250124085144.sh +18 -0
  42. .history/slurm_script/mbench_gpt_ref-ytvos_20250119070944.sh +18 -0
  43. .history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250130190228.sh +20 -0
  44. .history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250201140706.sh +20 -0
  45. .history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250202183206.sh +20 -0
  46. .history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250207171604.sh +20 -0
  47. .history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250207172920.sh +20 -0
  48. hf_cache/.locks/models--zhiqiulin--clip-flant5-xxl/ca26d90c9e8e071d0bc31b570aef68306d0be1db4330471d10a117061a15a991.lock +0 -0
  49. hf_cache/models--zhiqiulin--clip-flant5-xxl/.no_exist/89bad6fffe1126b24d4360c1e1f69145eb6103aa/pytorch_model.bin +0 -0
  50. hf_cache/models--zhiqiulin--clip-flant5-xxl/blobs/12acb5074c883dcab3e166d86d20130615ff83b0d26736ee046f4184202ebd3b +3 -0
.gitattributes CHANGED
@@ -46,3 +46,4 @@ LAVT-RIS/refer/data/refcocog/instances.json filter=lfs diff=lfs merge=lfs -text
46
  LAVT-RIS/refer/data/refcocog/refs(google).p filter=lfs diff=lfs merge=lfs -text
47
  LAVT-RIS/refer/data/refcocog/refs(umd).p filter=lfs diff=lfs merge=lfs -text
48
  LAVT-RIS/refer/evaluation/tokenizer/stanford-corenlp-3.4.1.jar filter=lfs diff=lfs merge=lfs -text
 
 
46
  LAVT-RIS/refer/data/refcocog/refs(google).p filter=lfs diff=lfs merge=lfs -text
47
  LAVT-RIS/refer/data/refcocog/refs(umd).p filter=lfs diff=lfs merge=lfs -text
48
  LAVT-RIS/refer/evaluation/tokenizer/stanford-corenlp-3.4.1.jar filter=lfs diff=lfs merge=lfs -text
49
+ hf_cache/models--zhiqiulin--clip-flant5-xxl/blobs/12acb5074c883dcab3e166d86d20130615ff83b0d26736ee046f4184202ebd3b filter=lfs diff=lfs merge=lfs -text
.history/datasets/a2d_20250203174308.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A2D-Sentences data loader
3
+ modified from https://github.com/mttr2021/MTTR/blob/main/datasets/a2d_sentences/a2d_sentences_dataset.py
4
+ """
5
+ from pathlib import Path
6
+
7
+ import torch
8
+ from torchvision.io import read_video
9
+ import torchvision.transforms.functional as F
10
+
11
+ from torch.utils.data import Dataset
12
+ import datasets.transforms_video as T
13
+
14
+ import os
15
+ from PIL import Image
16
+ import json
17
+ import numpy as np
18
+ import random
19
+
20
+ import h5py
21
+ from pycocotools.mask import encode, area
22
+
23
+
24
+ def get_image_id(video_id, frame_idx, ref_instance_a2d_id):
25
+ image_id = f'v_{video_id}_f_{frame_idx}_i_{ref_instance_a2d_id}'
26
+ return image_id
27
+
28
+ class A2DSentencesDataset(Dataset):
29
+ """
30
+ A Torch dataset for A2D-Sentences.
31
+ For more information check out: https://kgavrilyuk.github.io/publication/actor_action/ or the original paper at:
32
+ https://arxiv.org/abs/1803.07485
33
+ """
34
+ def __init__(self, image_folder: Path, ann_file: Path, transforms, return_masks: bool,
35
+ num_frames: int, max_skip: int, subset):
36
+ super(A2DSentencesDataset, self).__init__()
37
+ dataset_path = str(image_folder)
38
+ self.mask_annotations_dir = os.path.join(dataset_path, 'text_annotations/a2d_annotation_with_instances')
39
+ self.videos_dir = os.path.join(dataset_path, 'Release/clips320H')
40
+ self.ann_file = ann_file
41
+ self.text_annotations = self.get_text_annotations()
42
+
43
+ self._transforms = transforms
44
+ self.return_masks = return_masks # not used
45
+ self.num_frames = num_frames
46
+ self.max_skip = max_skip
47
+ self.subset = subset
48
+
49
+ print(f'\n {subset} sample num: ', len(self.text_annotations))
50
+ print('\n')
51
+
52
+ def get_text_annotations(self):
53
+ with open(str(self.ann_file), 'r') as f:
54
+ text_annotations_by_frame = [tuple(a) for a in json.load(f)]
55
+ return text_annotations_by_frame
56
+
57
+ @staticmethod
58
+ def bounding_box(img):
59
+ rows = np.any(img, axis=1)
60
+ cols = np.any(img, axis=0)
61
+ rmin, rmax = np.where(rows)[0][[0, -1]]
62
+ cmin, cmax = np.where(cols)[0][[0, -1]]
63
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
64
+
65
+ def __len__(self):
66
+ return len(self.text_annotations)
67
+
68
+ def __getitem__(self, idx):
69
+ instance_check = False
70
+ while not instance_check:
71
+ text_query, video_id, frame_idx, instance_id = self.text_annotations[idx]
72
+
73
+ text_query = " ".join(text_query.lower().split()) # clean up the text query
74
+
75
+ # read the source window frames:
76
+ video_frames, _, _ = read_video(os.path.join(self.videos_dir, f'{video_id}.mp4'), pts_unit='sec') # (T, H, W, C)
77
+ vid_len = len(video_frames)
78
+ # note that the original a2d dataset is 1 indexed, so we have to subtract 1 from frame_idx
79
+ frame_id = frame_idx - 1
80
+
81
+ if self.subset == 'train':
82
+ # get a window of window_size frames with frame frame_id in the middle.
83
+ num_frames = self.num_frames
84
+ # random sparse sample
85
+ sample_indx = [frame_id]
86
+ # local sample
87
+ sample_id_before = random.randint(1, 3)
88
+ sample_id_after = random.randint(1, 3)
89
+ local_indx = [max(0, frame_id - sample_id_before), min(vid_len - 1, frame_id + sample_id_after)]
90
+ sample_indx.extend(local_indx)
91
+
92
+ # global sampling
93
+ if num_frames > 3:
94
+ all_inds = list(range(vid_len))
95
+ global_inds = all_inds[:min(sample_indx)] + all_inds[max(sample_indx):]
96
+ global_n = num_frames - len(sample_indx)
97
+ if len(global_inds) > global_n:
98
+ select_id = random.sample(range(len(global_inds)), global_n)
99
+ for s_id in select_id:
100
+ sample_indx.append(global_inds[s_id])
101
+ elif vid_len >=global_n: # sample long range global frames
102
+ select_id = random.sample(range(vid_len), global_n)
103
+ for s_id in select_id:
104
+ sample_indx.append(all_inds[s_id])
105
+ else:
106
+ select_id = random.sample(range(vid_len), global_n - vid_len) + list(range(vid_len))
107
+ for s_id in select_id:
108
+ sample_indx.append(all_inds[s_id])
109
+ sample_indx.sort()
110
+ # find the valid frame index in sampled frame list, there is only one valid frame
111
+ valid_indices = sample_indx.index(frame_id)
112
+
113
+ elif self.subset == 'val':
114
+ start_idx, end_idx = frame_id - self.num_frames // 2, frame_id + (self.num_frames + 1) // 2
115
+ sample_indx = []
116
+ for i in range(start_idx, end_idx):
117
+ i = min(max(i, 0), len(video_frames)-1) # pad out of range indices with edge frames
118
+ sample_indx.append(i)
119
+ sample_indx.sort()
120
+ # find the valid frame index in sampled frame list, there is only one valid frame
121
+ valid_indices = sample_indx.index(frame_id)
122
+
123
+
124
+ # read frames
125
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
126
+ for j in range(self.num_frames):
127
+ frame_indx = sample_indx[j]
128
+ img = F.to_pil_image(video_frames[frame_indx].permute(2, 0, 1))
129
+ imgs.append(img)
130
+
131
+ # read the instance mask
132
+ frame_annot_path = os.path.join(self.mask_annotations_dir, video_id, f'{frame_idx:05d}.h5')
133
+ f = h5py.File(frame_annot_path)
134
+ instances = list(f['instance'])
135
+ instance_idx = instances.index(instance_id) # existence was already validated during init
136
+
137
+ instance_masks = np.array(f['reMask'])
138
+ if len(instances) == 1:
139
+ instance_masks = instance_masks[np.newaxis, ...]
140
+ instance_masks = torch.tensor(instance_masks).transpose(1, 2)
141
+ mask_rles = [encode(mask) for mask in instance_masks.numpy()]
142
+ mask_areas = area(mask_rles).astype(float)
143
+ f.close()
144
+
145
+ # select the referred mask
146
+ label = torch.tensor(0, dtype=torch.long)
147
+ mask = instance_masks[instance_idx].numpy()
148
+ if (mask > 0).any():
149
+ y1, y2, x1, x2 = self.bounding_box(mask)
150
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
151
+ valid.append(1)
152
+ else: # some frame didn't contain the instance
153
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
154
+ valid.append(0)
155
+ mask = torch.from_numpy(mask)
156
+ labels.append(label)
157
+ boxes.append(box)
158
+ masks.append(mask)
159
+
160
+ # transform
161
+ h, w = instance_masks.shape[-2:]
162
+ labels = torch.stack(labels, dim=0)
163
+ boxes = torch.stack(boxes, dim=0)
164
+ boxes[:, 0::2].clamp_(min=0, max=w)
165
+ boxes[:, 1::2].clamp_(min=0, max=h)
166
+ masks = torch.stack(masks, dim=0)
167
+ # there is only one valid frame
168
+ target = {
169
+ 'frames_idx': torch.tensor(sample_indx), # [T,]
170
+ 'valid_indices': torch.tensor([valid_indices]),
171
+ 'labels': labels, # [1,]
172
+ 'boxes': boxes, # [1, 4], xyxy
173
+ 'masks': masks, # [1, H, W]
174
+ 'valid': torch.tensor(valid), # [1,]
175
+ 'caption': text_query,
176
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
177
+ 'size': torch.as_tensor([int(h), int(w)]),
178
+ 'image_id': get_image_id(video_id,frame_idx, instance_id)
179
+ }
180
+
181
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
182
+ if self._transforms:
183
+ imgs, target = self._transforms(imgs, target)
184
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
185
+ else:
186
+ imgs = np.array(imgs)
187
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
188
+
189
+ # FIXME: handle "valid", since some box may be removed due to random crop
190
+ if torch.any(target['valid'] == 1): # at leatst one instance
191
+ instance_check = True
192
+ else:
193
+ idx = random.randint(0, self.__len__() - 1)
194
+
195
+ return imgs, target
196
+
197
+
198
+ def make_coco_transforms(image_set, max_size=640):
199
+ normalize = T.Compose([
200
+ T.ToTensor(),
201
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
202
+ ])
203
+
204
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
205
+
206
+ if image_set == 'train':
207
+ return T.Compose([
208
+ T.RandomHorizontalFlip(),
209
+ T.PhotometricDistort(),
210
+ T.RandomSelect(
211
+ T.Compose([
212
+ T.RandomResize(scales, max_size=max_size),
213
+ T.Check(),
214
+ ]),
215
+ T.Compose([
216
+ T.RandomResize([400, 500, 600]),
217
+ T.RandomSizeCrop(384, 600),
218
+ T.RandomResize(scales, max_size=max_size),
219
+ T.Check(),
220
+ ])
221
+ ),
222
+ normalize,
223
+ ])
224
+
225
+ # we do not use the 'val' set since the annotations are inaccessible
226
+ if image_set == 'val':
227
+ return T.Compose([
228
+ T.RandomResize([360], max_size=640),
229
+ normalize,
230
+ ])
231
+
232
+ raise ValueError(f'unknown {image_set}')
233
+
234
+
235
+ def build(image_set, args):
236
+ root = Path(args.a2d_path)
237
+ assert root.exists(), f'provided A2D-Sentences path {root} does not exist'
238
+ PATHS = {
239
+ "train": (root, root / "a2d_sentences_single_frame_train_annotations.json"),
240
+ "val": (root, root / "a2d_sentences_single_frame_test_annotations.json"),
241
+ }
242
+ img_folder, ann_file = PATHS[image_set]
243
+ #dataset = A2DSentencesDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size),
244
+ # return_masks=args.masks, num_frames=args.num_frames, max_skip=args.max_skip, subset=image_set)
245
+ dataset = A2DSentencesDataset(img_folder, ann_file, transforms=None,
246
+ return_masks=args.masks, num_frames=args.num_frames, max_skip=args.max_skip, subset=image_set)
247
+ return dataset
.history/datasets/ytvos_ref_20250113131134.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ for vid in self.videos:
61
+ vid_meta = subset_metas_by_video[vid]
62
+ vid_data = subset_expressions_by_video[vid]
63
+ vid_frames = sorted(vid_data['frames'])
64
+ vid_len = len(vid_frames)
65
+
66
+ print(vid_meta)
67
+
68
+ for exp_id, exp_dict in vid_data['expressions'].items():
69
+ print(exp_dict)
70
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
71
+ start_idx , end_idx = 2, vid_len-2
72
+ bin_size = (end_idx - start_idx) // 4
73
+
74
+ bins = []
75
+ for i in range(4):
76
+ bin_start = start_idx + i * bin_size
77
+ bin_end = bin_start + bin_size if i < 3 else end_idx
78
+
79
+ bins.append((bin_start, bin_end))
80
+
81
+
82
+ meta = {
83
+ 'video': vid,
84
+ 'exp': exp_dict['exp'],
85
+ 'obj_id': int(exp_dict['obj_id']),
86
+ 'frames': vid_frames,
87
+ 'bins': bins,
88
+ 'category': vid_meta['objects'][int(exp_dict['obj_id'])]['category']
89
+ }
90
+ self.metas.append(meta)
91
+
92
+
93
+ @staticmethod
94
+ def bounding_box(img):
95
+ rows = np.any(img, axis=1)
96
+ cols = np.any(img, axis=0)
97
+ rmin, rmax = np.where(rows)[0][[0, -1]]
98
+ cmin, cmax = np.where(cols)[0][[0, -1]]
99
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
100
+
101
+ def __len__(self):
102
+ return len(self.metas)
103
+
104
+ def __getitem__(self, idx):
105
+ instance_check = False
106
+ while not instance_check:
107
+ meta = self.metas[idx] # dict
108
+
109
+
110
+ video, exp, obj_id, category, frames, bins = \
111
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['bins']
112
+
113
+
114
+ # clean up the caption
115
+ exp = " ".join(exp.lower().split())
116
+ category_id = category_dict[category]
117
+ vid_len = len(frames)
118
+
119
+ # num_frames = self.num_frames
120
+
121
+ # Random sample one frame from each bin
122
+ sample_indx = []
123
+ for start_idx, end_idx in bins:
124
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
125
+ sample_indx.sort() # Ensure indices are in order
126
+
127
+ # read frames and masks
128
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
129
+ for frame_indx in sample_indx:
130
+ frame_name = frames[frame_indx]
131
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
132
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
133
+ img = Image.open(img_path).convert('RGB')
134
+ mask = Image.open(mask_path).convert('P')
135
+
136
+ # create the target
137
+ label = torch.tensor(category_id)
138
+ mask = np.array(mask)
139
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
140
+ if (mask > 0).any():
141
+ y1, y2, x1, x2 = self.bounding_box(mask)
142
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
143
+ valid.append(1)
144
+ else: # some frame didn't contain the instance
145
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
146
+ valid.append(0)
147
+ mask = torch.from_numpy(mask)
148
+
149
+ # append
150
+ imgs.append(img)
151
+ labels.append(label)
152
+ masks.append(mask)
153
+ boxes.append(box)
154
+
155
+ # transform
156
+ w, h = img.size
157
+ labels = torch.stack(labels, dim=0)
158
+ boxes = torch.stack(boxes, dim=0)
159
+ boxes[:, 0::2].clamp_(min=0, max=w)
160
+ boxes[:, 1::2].clamp_(min=0, max=h)
161
+ masks = torch.stack(masks, dim=0)
162
+ target = {
163
+ 'frames_idx': torch.tensor(sample_indx), # [T,]
164
+ 'labels': labels, # [T,]
165
+ 'boxes': boxes, # [T, 4], xyxy
166
+ 'masks': masks, # [T, H, W]
167
+ 'valid': torch.tensor(valid), # [T,]
168
+ 'caption': exp,
169
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
170
+ 'size': torch.as_tensor([int(h), int(w)])
171
+ }
172
+
173
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
174
+ if self._transforms:
175
+ imgs, target = self._transforms(imgs, target)
176
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
177
+ else:
178
+ imgs = np.array(imgs)
179
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
180
+
181
+
182
+ # FIXME: handle "valid", since some box may be removed due to random crop
183
+ if torch.any(target['valid'] == 1): # at leatst one instance
184
+ instance_check = True
185
+ else:
186
+ idx = random.randint(0, self.__len__() - 1)
187
+
188
+ return imgs, target
189
+
190
+
191
+ def make_coco_transforms(image_set, max_size=640):
192
+ normalize = T.Compose([
193
+ T.ToTensor(),
194
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
195
+ ])
196
+
197
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
198
+
199
+ if image_set == 'train':
200
+ return T.Compose([
201
+ T.RandomHorizontalFlip(),
202
+ T.PhotometricDistort(),
203
+ T.RandomSelect(
204
+ T.Compose([
205
+ T.RandomResize(scales, max_size=max_size),
206
+ T.Check(),
207
+ ]),
208
+ T.Compose([
209
+ T.RandomResize([400, 500, 600]),
210
+ T.RandomSizeCrop(384, 600),
211
+ T.RandomResize(scales, max_size=max_size),
212
+ T.Check(),
213
+ ])
214
+ ),
215
+ normalize,
216
+ ])
217
+
218
+ # we do not use the 'val' set since the annotations are inaccessible
219
+ if image_set == 'val':
220
+ return T.Compose([
221
+ T.RandomResize([360], max_size=640),
222
+ normalize,
223
+ ])
224
+
225
+ raise ValueError(f'unknown {image_set}')
226
+
227
+
228
+ def build(image_set, args):
229
+ root = Path(args.ytvos_path)
230
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
231
+ PATHS = {
232
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
233
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
234
+ }
235
+ img_folder, ann_file = PATHS[image_set]
236
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
237
+ # num_frames=args.num_frames, max_skip=args.max_skip)
238
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
239
+ num_frames=args.num_frames, max_skip=args.max_skip)
240
+ return dataset
241
+
.history/datasets/ytvos_ref_20250113131327.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ for vid in self.videos:
61
+ vid_meta = subset_metas_by_video[vid]
62
+ vid_data = subset_expressions_by_video[vid]
63
+ vid_frames = sorted(vid_data['frames'])
64
+ vid_len = len(vid_frames)
65
+
66
+ print(vid_meta)
67
+
68
+ for exp_id, exp_dict in vid_data['expressions'].items():
69
+ print(exp_dict)
70
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
71
+ start_idx , end_idx = 2, vid_len-2
72
+ bin_size = (end_idx - start_idx) // 4
73
+
74
+ bins = []
75
+ for i in range(4):
76
+ bin_start = start_idx + i * bin_size
77
+ bin_end = bin_start + bin_size if i < 3 else end_idx
78
+
79
+ bins.append((bin_start, bin_end))
80
+
81
+
82
+ meta = {
83
+ 'video': vid,
84
+ 'exp': exp_dict['exp'],
85
+ 'obj_id': int(exp_dict['obj_id']),
86
+ 'frames': vid_frames,
87
+ 'bins': bins,
88
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
89
+ }
90
+ self.metas.append(meta)
91
+
92
+
93
+ @staticmethod
94
+ def bounding_box(img):
95
+ rows = np.any(img, axis=1)
96
+ cols = np.any(img, axis=0)
97
+ rmin, rmax = np.where(rows)[0][[0, -1]]
98
+ cmin, cmax = np.where(cols)[0][[0, -1]]
99
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
100
+
101
+ def __len__(self):
102
+ return len(self.metas)
103
+
104
+ def __getitem__(self, idx):
105
+ instance_check = False
106
+ while not instance_check:
107
+ meta = self.metas[idx] # dict
108
+
109
+
110
+ video, exp, obj_id, category, frames, bins = \
111
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['bins']
112
+
113
+
114
+ # clean up the caption
115
+ exp = " ".join(exp.lower().split())
116
+ category_id = category_dict[category]
117
+ vid_len = len(frames)
118
+
119
+ # num_frames = self.num_frames
120
+
121
+ # Random sample one frame from each bin
122
+ sample_indx = []
123
+ for start_idx, end_idx in bins:
124
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
125
+ sample_indx.sort() # Ensure indices are in order
126
+
127
+ # read frames and masks
128
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
129
+ for frame_indx in sample_indx:
130
+ frame_name = frames[frame_indx]
131
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
132
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
133
+ img = Image.open(img_path).convert('RGB')
134
+ mask = Image.open(mask_path).convert('P')
135
+
136
+ # create the target
137
+ label = torch.tensor(category_id)
138
+ mask = np.array(mask)
139
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
140
+ if (mask > 0).any():
141
+ y1, y2, x1, x2 = self.bounding_box(mask)
142
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
143
+ valid.append(1)
144
+ else: # some frame didn't contain the instance
145
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
146
+ valid.append(0)
147
+ mask = torch.from_numpy(mask)
148
+
149
+ # append
150
+ imgs.append(img)
151
+ labels.append(label)
152
+ masks.append(mask)
153
+ boxes.append(box)
154
+
155
+ # transform
156
+ w, h = img.size
157
+ labels = torch.stack(labels, dim=0)
158
+ boxes = torch.stack(boxes, dim=0)
159
+ boxes[:, 0::2].clamp_(min=0, max=w)
160
+ boxes[:, 1::2].clamp_(min=0, max=h)
161
+ masks = torch.stack(masks, dim=0)
162
+ target = {
163
+ 'frames_idx': torch.tensor(sample_indx), # [T,]
164
+ 'labels': labels, # [T,]
165
+ 'boxes': boxes, # [T, 4], xyxy
166
+ 'masks': masks, # [T, H, W]
167
+ 'valid': torch.tensor(valid), # [T,]
168
+ 'caption': exp,
169
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
170
+ 'size': torch.as_tensor([int(h), int(w)])
171
+ }
172
+
173
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
174
+ if self._transforms:
175
+ imgs, target = self._transforms(imgs, target)
176
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
177
+ else:
178
+ imgs = np.array(imgs)
179
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
180
+
181
+
182
+ # FIXME: handle "valid", since some box may be removed due to random crop
183
+ if torch.any(target['valid'] == 1): # at leatst one instance
184
+ instance_check = True
185
+ else:
186
+ idx = random.randint(0, self.__len__() - 1)
187
+
188
+ return imgs, target
189
+
190
+
191
+ def make_coco_transforms(image_set, max_size=640):
192
+ normalize = T.Compose([
193
+ T.ToTensor(),
194
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
195
+ ])
196
+
197
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
198
+
199
+ if image_set == 'train':
200
+ return T.Compose([
201
+ T.RandomHorizontalFlip(),
202
+ T.PhotometricDistort(),
203
+ T.RandomSelect(
204
+ T.Compose([
205
+ T.RandomResize(scales, max_size=max_size),
206
+ T.Check(),
207
+ ]),
208
+ T.Compose([
209
+ T.RandomResize([400, 500, 600]),
210
+ T.RandomSizeCrop(384, 600),
211
+ T.RandomResize(scales, max_size=max_size),
212
+ T.Check(),
213
+ ])
214
+ ),
215
+ normalize,
216
+ ])
217
+
218
+ # we do not use the 'val' set since the annotations are inaccessible
219
+ if image_set == 'val':
220
+ return T.Compose([
221
+ T.RandomResize([360], max_size=640),
222
+ normalize,
223
+ ])
224
+
225
+ raise ValueError(f'unknown {image_set}')
226
+
227
+
228
+ def build(image_set, args):
229
+ root = Path(args.ytvos_path)
230
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
231
+ PATHS = {
232
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
233
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
234
+ }
235
+ img_folder, ann_file = PATHS[image_set]
236
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
237
+ # num_frames=args.num_frames, max_skip=args.max_skip)
238
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
239
+ num_frames=args.num_frames, max_skip=args.max_skip)
240
+ return dataset
241
+
.history/datasets/ytvos_ref_20250113141118.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ for vid in self.videos:
61
+ vid_meta = subset_metas_by_video[vid]
62
+ vid_data = subset_expressions_by_video[vid]
63
+ vid_frames = sorted(vid_data['frames'])
64
+ vid_len = len(vid_frames)
65
+ print(vid_meta)
66
+ print(vid_data)
67
+
68
+
69
+ for exp_id, exp_dict in vid_data['expressions'].items():
70
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
71
+ start_idx , end_idx = 2, vid_len-2
72
+ bin_size = (end_idx - start_idx) // 4
73
+
74
+ bins = []
75
+ for i in range(4):
76
+ bin_start = start_idx + i * bin_size
77
+ bin_end = bin_start + bin_size if i < 3 else end_idx
78
+
79
+ bins.append((bin_start, bin_end))
80
+
81
+
82
+ meta = {
83
+ 'video': vid,
84
+ 'exp': exp_dict['exp'],
85
+ 'obj_id': int(exp_dict['obj_id']),
86
+ 'frames': vid_frames,
87
+ 'bins': bins,
88
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
89
+ }
90
+ self.metas.append(meta)
91
+
92
+
93
+ @staticmethod
94
+ def bounding_box(img):
95
+ rows = np.any(img, axis=1)
96
+ cols = np.any(img, axis=0)
97
+ rmin, rmax = np.where(rows)[0][[0, -1]]
98
+ cmin, cmax = np.where(cols)[0][[0, -1]]
99
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
100
+
101
+ def __len__(self):
102
+ return len(self.metas)
103
+
104
+ def __getitem__(self, idx):
105
+ instance_check = False
106
+ while not instance_check:
107
+ meta = self.metas[idx] # dict
108
+
109
+
110
+ video, exp, obj_id, category, frames, bins = \
111
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['bins']
112
+
113
+
114
+ # clean up the caption
115
+ exp = " ".join(exp.lower().split())
116
+ category_id = category_dict[category]
117
+ vid_len = len(frames)
118
+
119
+ # num_frames = self.num_frames
120
+
121
+ # Random sample one frame from each bin
122
+ sample_indx = []
123
+ for start_idx, end_idx in bins:
124
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
125
+ sample_indx.sort() # Ensure indices are in order
126
+
127
+ # read frames and masks
128
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
129
+ for frame_indx in sample_indx:
130
+ frame_name = frames[frame_indx]
131
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
132
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
133
+ img = Image.open(img_path).convert('RGB')
134
+ mask = Image.open(mask_path).convert('P')
135
+
136
+ # create the target
137
+ label = torch.tensor(category_id)
138
+ mask = np.array(mask)
139
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
140
+ if (mask > 0).any():
141
+ y1, y2, x1, x2 = self.bounding_box(mask)
142
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
143
+ valid.append(1)
144
+ else: # some frame didn't contain the instance
145
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
146
+ valid.append(0)
147
+ mask = torch.from_numpy(mask)
148
+
149
+ # append
150
+ imgs.append(img)
151
+ labels.append(label)
152
+ masks.append(mask)
153
+ boxes.append(box)
154
+
155
+ # transform
156
+ w, h = img.size
157
+ labels = torch.stack(labels, dim=0)
158
+ boxes = torch.stack(boxes, dim=0)
159
+ boxes[:, 0::2].clamp_(min=0, max=w)
160
+ boxes[:, 1::2].clamp_(min=0, max=h)
161
+ masks = torch.stack(masks, dim=0)
162
+ target = {
163
+ 'frames_idx': torch.tensor(sample_indx), # [T,]
164
+ 'labels': labels, # [T,]
165
+ 'boxes': boxes, # [T, 4], xyxy
166
+ 'masks': masks, # [T, H, W]
167
+ 'valid': torch.tensor(valid), # [T,]
168
+ 'caption': exp,
169
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
170
+ 'size': torch.as_tensor([int(h), int(w)])
171
+ }
172
+
173
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
174
+ if self._transforms:
175
+ imgs, target = self._transforms(imgs, target)
176
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
177
+ else:
178
+ imgs = np.array(imgs)
179
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
180
+
181
+
182
+ # FIXME: handle "valid", since some box may be removed due to random crop
183
+ if torch.any(target['valid'] == 1): # at leatst one instance
184
+ instance_check = True
185
+ else:
186
+ idx = random.randint(0, self.__len__() - 1)
187
+
188
+ return imgs, target
189
+
190
+
191
+ def make_coco_transforms(image_set, max_size=640):
192
+ normalize = T.Compose([
193
+ T.ToTensor(),
194
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
195
+ ])
196
+
197
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
198
+
199
+ if image_set == 'train':
200
+ return T.Compose([
201
+ T.RandomHorizontalFlip(),
202
+ T.PhotometricDistort(),
203
+ T.RandomSelect(
204
+ T.Compose([
205
+ T.RandomResize(scales, max_size=max_size),
206
+ T.Check(),
207
+ ]),
208
+ T.Compose([
209
+ T.RandomResize([400, 500, 600]),
210
+ T.RandomSizeCrop(384, 600),
211
+ T.RandomResize(scales, max_size=max_size),
212
+ T.Check(),
213
+ ])
214
+ ),
215
+ normalize,
216
+ ])
217
+
218
+ # we do not use the 'val' set since the annotations are inaccessible
219
+ if image_set == 'val':
220
+ return T.Compose([
221
+ T.RandomResize([360], max_size=640),
222
+ normalize,
223
+ ])
224
+
225
+ raise ValueError(f'unknown {image_set}')
226
+
227
+
228
+ def build(image_set, args):
229
+ root = Path(args.ytvos_path)
230
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
231
+ PATHS = {
232
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
233
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
234
+ }
235
+ img_folder, ann_file = PATHS[image_set]
236
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
237
+ # num_frames=args.num_frames, max_skip=args.max_skip)
238
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
239
+ num_frames=args.num_frames, max_skip=args.max_skip)
240
+ return dataset
241
+
.history/datasets/ytvos_ref_20250113162417.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ for vid in self.videos:
61
+ vid_meta = subset_metas_by_video[vid]
62
+ vid_data = subset_expressions_by_video[vid]
63
+ vid_frames = sorted(vid_data['frames'])
64
+ vid_len = len(vid_frames)
65
+
66
+ for exp_id, exp_dict in vid_data['expressions'].items():
67
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
68
+ start_idx , end_idx = 2, vid_len-2
69
+ bin_size = (end_idx - start_idx) // 4
70
+
71
+ bins = []
72
+ for i in range(4):
73
+ bin_start = start_idx + i * bin_size
74
+ bin_end = bin_start + bin_size if i < 3 else end_idx
75
+
76
+ bins.append((bin_start, bin_end))
77
+
78
+ # Random sample one frame from each bin
79
+ sample_indx = []
80
+ for start_idx, end_idx in bins:
81
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
82
+ sample_indx.sort() # Ensure indices are in order
83
+
84
+
85
+ for frame_id in sample_indx:
86
+ meta = {
87
+ 'video': vid,
88
+ 'exp': exp_dict['exp'],
89
+ 'obj_id': int(exp_dict['obj_id']),
90
+ 'frames': vid_frames,
91
+ 'frame_id' : frame_id,
92
+ 'sample_frames_id' : sample_indx,
93
+ 'bins': bins,
94
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
95
+ }
96
+ self.metas.append(meta)
97
+
98
+
99
+ @staticmethod
100
+ def bounding_box(img):
101
+ rows = np.any(img, axis=1)
102
+ cols = np.any(img, axis=0)
103
+ rmin, rmax = np.where(rows)[0][[0, -1]]
104
+ cmin, cmax = np.where(cols)[0][[0, -1]]
105
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
106
+
107
+ def __len__(self):
108
+ return len(self.metas)
109
+
110
+ def __getitem__(self, idx):
111
+ instance_check = False
112
+ while not instance_check:
113
+ meta = self.metas[idx] # dict
114
+
115
+
116
+ video, exp, obj_id, category, frames, frame_id, sample_frames_id, bins = \
117
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], metas['frame_id'], metas['sample_frames_id'], meta['bins']
118
+
119
+
120
+ # clean up the caption
121
+ exp = " ".join(exp.lower().split())
122
+ category_id = category_dict[category]
123
+ vid_len = len(frames)
124
+
125
+ # num_frames = self.num_frames
126
+
127
+ # read frames and masks
128
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
129
+ for frame_indx in sample_frames_id:
130
+ frame_name = frames[frame_indx]
131
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
132
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
133
+ img = Image.open(img_path).convert('RGB')
134
+ mask = Image.open(mask_path).convert('P')
135
+
136
+ # create the target
137
+ label = torch.tensor(category_id)
138
+ mask = np.array(mask)
139
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
140
+ if (mask > 0).any():
141
+ y1, y2, x1, x2 = self.bounding_box(mask)
142
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
143
+ valid.append(1)
144
+ else: # some frame didn't contain the instance
145
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
146
+ valid.append(0)
147
+ mask = torch.from_numpy(mask)
148
+
149
+ # append
150
+ imgs.append(img)
151
+ labels.append(label)
152
+ masks.append(mask)
153
+ boxes.append(box)
154
+
155
+ # transform
156
+ w, h = img.size
157
+ labels = torch.stack(labels, dim=0)
158
+ boxes = torch.stack(boxes, dim=0)
159
+ boxes[:, 0::2].clamp_(min=0, max=w)
160
+ boxes[:, 1::2].clamp_(min=0, max=h)
161
+ masks = torch.stack(masks, dim=0)
162
+ target = {
163
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
164
+ 'labels': labels, # [T,]
165
+ 'boxes': boxes, # [T, 4], xyxy
166
+ 'masks': masks, # [T, H, W]
167
+ 'valid': torch.tensor(valid), # [T,]
168
+ 'caption': exp,
169
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
170
+ 'size': torch.as_tensor([int(h), int(w)])
171
+ }
172
+
173
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
174
+ if self._transforms:
175
+ imgs, target = self._transforms(imgs, target)
176
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
177
+ else:
178
+ imgs = np.array(imgs)
179
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
180
+
181
+
182
+ # FIXME: handle "valid", since some box may be removed due to random crop
183
+ if torch.any(target['valid'] == 1): # at leatst one instance
184
+ instance_check = True
185
+ else:
186
+ idx = random.randint(0, self.__len__() - 1)
187
+
188
+ return imgs, target
189
+
190
+
191
+ def make_coco_transforms(image_set, max_size=640):
192
+ normalize = T.Compose([
193
+ T.ToTensor(),
194
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
195
+ ])
196
+
197
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
198
+
199
+ if image_set == 'train':
200
+ return T.Compose([
201
+ T.RandomHorizontalFlip(),
202
+ T.PhotometricDistort(),
203
+ T.RandomSelect(
204
+ T.Compose([
205
+ T.RandomResize(scales, max_size=max_size),
206
+ T.Check(),
207
+ ]),
208
+ T.Compose([
209
+ T.RandomResize([400, 500, 600]),
210
+ T.RandomSizeCrop(384, 600),
211
+ T.RandomResize(scales, max_size=max_size),
212
+ T.Check(),
213
+ ])
214
+ ),
215
+ normalize,
216
+ ])
217
+
218
+ # we do not use the 'val' set since the annotations are inaccessible
219
+ if image_set == 'val':
220
+ return T.Compose([
221
+ T.RandomResize([360], max_size=640),
222
+ normalize,
223
+ ])
224
+
225
+ raise ValueError(f'unknown {image_set}')
226
+
227
+
228
+ def build(image_set, args):
229
+ root = Path(args.ytvos_path)
230
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
231
+ PATHS = {
232
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
233
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
234
+ }
235
+ img_folder, ann_file = PATHS[image_set]
236
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
237
+ # num_frames=args.num_frames, max_skip=args.max_skip)
238
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
239
+ num_frames=args.num_frames, max_skip=args.max_skip)
240
+ return dataset
241
+
.history/datasets/ytvos_ref_20250113163313.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+ for exp_id, exp_dict in vid_data['expressions'].items():
74
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
75
+ start_idx , end_idx = 2, vid_len-2
76
+ bin_size = (end_idx - start_idx) // 4
77
+
78
+ bins = []
79
+ for i in range(4):
80
+ bin_start = start_idx + i * bin_size
81
+ bin_end = bin_start + bin_size if i < 3 else end_idx
82
+
83
+ bins.append((bin_start, bin_end))
84
+
85
+ # Random sample one frame from each bin
86
+ sample_indx = []
87
+ for start_idx, end_idx in bins:
88
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
89
+ sample_indx.sort() # Ensure indices are in order
90
+
91
+
92
+ for frame_id in sample_indx:
93
+ meta = {
94
+ 'video': vid,
95
+ 'exp': exp_dict['exp'],
96
+ 'obj_id': int(exp_dict['obj_id']),
97
+ 'frames': vid_frames,
98
+ 'frame_id' : frame_id,
99
+ 'sample_frames_id' : sample_indx,
100
+ 'bins': bins,
101
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
102
+ }
103
+ self.metas.append(meta)
104
+
105
+
106
+ @staticmethod
107
+ def bounding_box(img):
108
+ rows = np.any(img, axis=1)
109
+ cols = np.any(img, axis=0)
110
+ rmin, rmax = np.where(rows)[0][[0, -1]]
111
+ cmin, cmax = np.where(cols)[0][[0, -1]]
112
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
113
+
114
+ def __len__(self):
115
+ return len(self.metas)
116
+
117
+ def __getitem__(self, idx):
118
+ instance_check = False
119
+ while not instance_check:
120
+ meta = self.metas[idx] # dict
121
+
122
+
123
+ video, exp, obj_id, category, frames, frame_id, sample_frames_id, bins = \
124
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], metas['frame_id'], metas['sample_frames_id'], meta['bins']
125
+
126
+
127
+ # clean up the caption
128
+ exp = " ".join(exp.lower().split())
129
+ category_id = category_dict[category]
130
+ vid_len = len(frames)
131
+
132
+ # num_frames = self.num_frames
133
+
134
+ # read frames and masks
135
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
136
+ for frame_indx in sample_frames_id:
137
+ frame_name = frames[frame_indx]
138
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
139
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
140
+ img = Image.open(img_path).convert('RGB')
141
+ mask = Image.open(mask_path).convert('P')
142
+
143
+ # create the target
144
+ label = torch.tensor(category_id)
145
+ mask = np.array(mask)
146
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
147
+ if (mask > 0).any():
148
+ y1, y2, x1, x2 = self.bounding_box(mask)
149
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
150
+ valid.append(1)
151
+ else: # some frame didn't contain the instance
152
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
153
+ valid.append(0)
154
+ mask = torch.from_numpy(mask)
155
+
156
+ # append
157
+ imgs.append(img)
158
+ labels.append(label)
159
+ masks.append(mask)
160
+ boxes.append(box)
161
+
162
+ # transform
163
+ w, h = img.size
164
+ labels = torch.stack(labels, dim=0)
165
+ boxes = torch.stack(boxes, dim=0)
166
+ boxes[:, 0::2].clamp_(min=0, max=w)
167
+ boxes[:, 1::2].clamp_(min=0, max=h)
168
+ masks = torch.stack(masks, dim=0)
169
+ target = {
170
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
171
+ 'labels': labels, # [T,]
172
+ 'boxes': boxes, # [T, 4], xyxy
173
+ 'masks': masks, # [T, H, W]
174
+ 'valid': torch.tensor(valid), # [T,]
175
+ 'caption': exp,
176
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
177
+ 'size': torch.as_tensor([int(h), int(w)])
178
+ }
179
+
180
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
181
+ if self._transforms:
182
+ imgs, target = self._transforms(imgs, target)
183
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
184
+ else:
185
+ imgs = np.array(imgs)
186
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
187
+
188
+
189
+ # FIXME: handle "valid", since some box may be removed due to random crop
190
+ if torch.any(target['valid'] == 1): # at leatst one instance
191
+ instance_check = True
192
+ else:
193
+ idx = random.randint(0, self.__len__() - 1)
194
+
195
+ return imgs, target
196
+
197
+
198
+ def make_coco_transforms(image_set, max_size=640):
199
+ normalize = T.Compose([
200
+ T.ToTensor(),
201
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
202
+ ])
203
+
204
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
205
+
206
+ if image_set == 'train':
207
+ return T.Compose([
208
+ T.RandomHorizontalFlip(),
209
+ T.PhotometricDistort(),
210
+ T.RandomSelect(
211
+ T.Compose([
212
+ T.RandomResize(scales, max_size=max_size),
213
+ T.Check(),
214
+ ]),
215
+ T.Compose([
216
+ T.RandomResize([400, 500, 600]),
217
+ T.RandomSizeCrop(384, 600),
218
+ T.RandomResize(scales, max_size=max_size),
219
+ T.Check(),
220
+ ])
221
+ ),
222
+ normalize,
223
+ ])
224
+
225
+ # we do not use the 'val' set since the annotations are inaccessible
226
+ if image_set == 'val':
227
+ return T.Compose([
228
+ T.RandomResize([360], max_size=640),
229
+ normalize,
230
+ ])
231
+
232
+ raise ValueError(f'unknown {image_set}')
233
+
234
+
235
+ def build(image_set, args):
236
+ root = Path(args.ytvos_path)
237
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
238
+ PATHS = {
239
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
240
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
241
+ }
242
+ img_folder, ann_file = PATHS[image_set]
243
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
244
+ # num_frames=args.num_frames, max_skip=args.max_skip)
245
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
246
+ num_frames=args.num_frames, max_skip=args.max_skip)
247
+ return dataset
248
+
.history/datasets/ytvos_ref_20250114201904.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+ print(f"vid_data: {vid_data}")
74
+ print(f"vid_meta: {vid_meta}")
75
+ for exp_id, exp_dict in vid_data['expressions'].items():
76
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
77
+ start_idx , end_idx = 2, vid_len-2
78
+ bin_size = (end_idx - start_idx) // 4
79
+
80
+ bins = []
81
+ for i in range(4):
82
+ bin_start = start_idx + i * bin_size
83
+ bin_end = bin_start + bin_size if i < 3 else end_idx
84
+
85
+ bins.append((bin_start, bin_end))
86
+
87
+ # Random sample one frame from each bin
88
+ sample_indx = []
89
+ for start_idx, end_idx in bins:
90
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
91
+ sample_indx.sort() # Ensure indices are in order
92
+
93
+
94
+ for sample_id in sample_indx:
95
+ meta = {
96
+ 'video': vid,
97
+ 'exp': exp_dict['exp'],
98
+ 'obj_id': int(exp_dict['obj_id']),
99
+ 'frames': vid_frames,
100
+ 'sample_id' : sample_id,
101
+ 'sample_frames_id' : sample_indx,
102
+ 'bins': bins,
103
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
104
+ }
105
+ self.metas.append(meta)
106
+
107
+ print(f"skipped {skip_vid_count} short videos")
108
+
109
+
110
+ @staticmethod
111
+ def bounding_box(img):
112
+ rows = np.any(img, axis=1)
113
+ cols = np.any(img, axis=0)
114
+ rmin, rmax = np.where(rows)[0][[0, -1]]
115
+ cmin, cmax = np.where(cols)[0][[0, -1]]
116
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
117
+
118
+ def __len__(self):
119
+ return len(self.metas)
120
+
121
+ def __getitem__(self, idx):
122
+ instance_check = False
123
+ while not instance_check:
124
+ meta = self.metas[idx] # dict
125
+
126
+
127
+ video, exp, obj_id, category, frames, sample_id, sample_frames_id, bins = \
128
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['sample_id'], meta['sample_frames_id'], meta['bins']
129
+
130
+
131
+ # clean up the caption
132
+ exp = " ".join(exp.lower().split())
133
+ category_id = category_dict[category]
134
+ vid_len = len(frames)
135
+
136
+ # num_frames = self.num_frames
137
+
138
+ # read frames and masks
139
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
140
+ for frame_indx in sample_frames_id:
141
+ frame_name = frames[frame_indx]
142
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
143
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
144
+ img = Image.open(img_path).convert('RGB')
145
+ mask = Image.open(mask_path).convert('P')
146
+
147
+ # create the target
148
+ label = torch.tensor(category_id)
149
+ mask = np.array(mask)
150
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
151
+ if (mask > 0).any():
152
+ y1, y2, x1, x2 = self.bounding_box(mask)
153
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
154
+ valid.append(1)
155
+ else: # some frame didn't contain the instance
156
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
157
+ valid.append(0)
158
+ mask = torch.from_numpy(mask)
159
+
160
+ # append
161
+ imgs.append(img)
162
+ labels.append(label)
163
+ masks.append(mask)
164
+ boxes.append(box)
165
+
166
+ # transform
167
+ w, h = img.size
168
+ labels = torch.stack(labels, dim=0)
169
+ boxes = torch.stack(boxes, dim=0)
170
+ boxes[:, 0::2].clamp_(min=0, max=w)
171
+ boxes[:, 1::2].clamp_(min=0, max=h)
172
+ masks = torch.stack(masks, dim=0)
173
+ target = {
174
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
175
+ 'labels': labels, # [T,]
176
+ 'boxes': boxes, # [T, 4], xyxy
177
+ 'masks': masks, # [T, H, W]
178
+ 'valid': torch.tensor(valid), # [T,]
179
+ 'caption': exp,
180
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
181
+ 'size': torch.as_tensor([int(h), int(w)])
182
+ }
183
+
184
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
185
+ if self._transforms:
186
+ imgs, target = self._transforms(imgs, target)
187
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
188
+ else:
189
+ imgs = np.array(imgs)
190
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
191
+
192
+
193
+ # FIXME: handle "valid", since some box may be removed due to random crop
194
+ if torch.any(target['valid'] == 1): # at leatst one instance
195
+ instance_check = True
196
+ else:
197
+ idx = random.randint(0, self.__len__() - 1)
198
+
199
+ return imgs, target
200
+
201
+
202
+ def make_coco_transforms(image_set, max_size=640):
203
+ normalize = T.Compose([
204
+ T.ToTensor(),
205
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
206
+ ])
207
+
208
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
209
+
210
+ if image_set == 'train':
211
+ return T.Compose([
212
+ T.RandomHorizontalFlip(),
213
+ T.PhotometricDistort(),
214
+ T.RandomSelect(
215
+ T.Compose([
216
+ T.RandomResize(scales, max_size=max_size),
217
+ T.Check(),
218
+ ]),
219
+ T.Compose([
220
+ T.RandomResize([400, 500, 600]),
221
+ T.RandomSizeCrop(384, 600),
222
+ T.RandomResize(scales, max_size=max_size),
223
+ T.Check(),
224
+ ])
225
+ ),
226
+ normalize,
227
+ ])
228
+
229
+ # we do not use the 'val' set since the annotations are inaccessible
230
+ if image_set == 'val':
231
+ return T.Compose([
232
+ T.RandomResize([360], max_size=640),
233
+ normalize,
234
+ ])
235
+
236
+ raise ValueError(f'unknown {image_set}')
237
+
238
+
239
+ def build(image_set, args):
240
+ root = Path(args.ytvos_path)
241
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
242
+ PATHS = {
243
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
244
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
245
+ }
246
+ img_folder, ann_file = PATHS[image_set]
247
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
248
+ # num_frames=args.num_frames, max_skip=args.max_skip)
249
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
250
+ num_frames=args.num_frames, max_skip=args.max_skip)
251
+ return dataset
252
+
.history/datasets/ytvos_ref_20250114201908.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+ print(f"vid_data: {vid_data}")
74
+ print(f"vid_meta: {vid_meta}")
75
+
76
+ for exp_id, exp_dict in vid_data['expressions'].items():
77
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
78
+ start_idx , end_idx = 2, vid_len-2
79
+ bin_size = (end_idx - start_idx) // 4
80
+
81
+ bins = []
82
+ for i in range(4):
83
+ bin_start = start_idx + i * bin_size
84
+ bin_end = bin_start + bin_size if i < 3 else end_idx
85
+
86
+ bins.append((bin_start, bin_end))
87
+
88
+ # Random sample one frame from each bin
89
+ sample_indx = []
90
+ for start_idx, end_idx in bins:
91
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
92
+ sample_indx.sort() # Ensure indices are in order
93
+
94
+
95
+ for sample_id in sample_indx:
96
+ meta = {
97
+ 'video': vid,
98
+ 'exp': exp_dict['exp'],
99
+ 'obj_id': int(exp_dict['obj_id']),
100
+ 'frames': vid_frames,
101
+ 'sample_id' : sample_id,
102
+ 'sample_frames_id' : sample_indx,
103
+ 'bins': bins,
104
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
105
+ }
106
+ self.metas.append(meta)
107
+
108
+ print(f"skipped {skip_vid_count} short videos")
109
+
110
+
111
+ @staticmethod
112
+ def bounding_box(img):
113
+ rows = np.any(img, axis=1)
114
+ cols = np.any(img, axis=0)
115
+ rmin, rmax = np.where(rows)[0][[0, -1]]
116
+ cmin, cmax = np.where(cols)[0][[0, -1]]
117
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
118
+
119
+ def __len__(self):
120
+ return len(self.metas)
121
+
122
+ def __getitem__(self, idx):
123
+ instance_check = False
124
+ while not instance_check:
125
+ meta = self.metas[idx] # dict
126
+
127
+
128
+ video, exp, obj_id, category, frames, sample_id, sample_frames_id, bins = \
129
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['sample_id'], meta['sample_frames_id'], meta['bins']
130
+
131
+
132
+ # clean up the caption
133
+ exp = " ".join(exp.lower().split())
134
+ category_id = category_dict[category]
135
+ vid_len = len(frames)
136
+
137
+ # num_frames = self.num_frames
138
+
139
+ # read frames and masks
140
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
141
+ for frame_indx in sample_frames_id:
142
+ frame_name = frames[frame_indx]
143
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
144
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
145
+ img = Image.open(img_path).convert('RGB')
146
+ mask = Image.open(mask_path).convert('P')
147
+
148
+ # create the target
149
+ label = torch.tensor(category_id)
150
+ mask = np.array(mask)
151
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
152
+ if (mask > 0).any():
153
+ y1, y2, x1, x2 = self.bounding_box(mask)
154
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
155
+ valid.append(1)
156
+ else: # some frame didn't contain the instance
157
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
158
+ valid.append(0)
159
+ mask = torch.from_numpy(mask)
160
+
161
+ # append
162
+ imgs.append(img)
163
+ labels.append(label)
164
+ masks.append(mask)
165
+ boxes.append(box)
166
+
167
+ # transform
168
+ w, h = img.size
169
+ labels = torch.stack(labels, dim=0)
170
+ boxes = torch.stack(boxes, dim=0)
171
+ boxes[:, 0::2].clamp_(min=0, max=w)
172
+ boxes[:, 1::2].clamp_(min=0, max=h)
173
+ masks = torch.stack(masks, dim=0)
174
+ target = {
175
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
176
+ 'labels': labels, # [T,]
177
+ 'boxes': boxes, # [T, 4], xyxy
178
+ 'masks': masks, # [T, H, W]
179
+ 'valid': torch.tensor(valid), # [T,]
180
+ 'caption': exp,
181
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
182
+ 'size': torch.as_tensor([int(h), int(w)])
183
+ }
184
+
185
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
186
+ if self._transforms:
187
+ imgs, target = self._transforms(imgs, target)
188
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
189
+ else:
190
+ imgs = np.array(imgs)
191
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
192
+
193
+
194
+ # FIXME: handle "valid", since some box may be removed due to random crop
195
+ if torch.any(target['valid'] == 1): # at leatst one instance
196
+ instance_check = True
197
+ else:
198
+ idx = random.randint(0, self.__len__() - 1)
199
+
200
+ return imgs, target
201
+
202
+
203
+ def make_coco_transforms(image_set, max_size=640):
204
+ normalize = T.Compose([
205
+ T.ToTensor(),
206
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
207
+ ])
208
+
209
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
210
+
211
+ if image_set == 'train':
212
+ return T.Compose([
213
+ T.RandomHorizontalFlip(),
214
+ T.PhotometricDistort(),
215
+ T.RandomSelect(
216
+ T.Compose([
217
+ T.RandomResize(scales, max_size=max_size),
218
+ T.Check(),
219
+ ]),
220
+ T.Compose([
221
+ T.RandomResize([400, 500, 600]),
222
+ T.RandomSizeCrop(384, 600),
223
+ T.RandomResize(scales, max_size=max_size),
224
+ T.Check(),
225
+ ])
226
+ ),
227
+ normalize,
228
+ ])
229
+
230
+ # we do not use the 'val' set since the annotations are inaccessible
231
+ if image_set == 'val':
232
+ return T.Compose([
233
+ T.RandomResize([360], max_size=640),
234
+ normalize,
235
+ ])
236
+
237
+ raise ValueError(f'unknown {image_set}')
238
+
239
+
240
+ def build(image_set, args):
241
+ root = Path(args.ytvos_path)
242
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
243
+ PATHS = {
244
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
245
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
246
+ }
247
+ img_folder, ann_file = PATHS[image_set]
248
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
249
+ # num_frames=args.num_frames, max_skip=args.max_skip)
250
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
251
+ num_frames=args.num_frames, max_skip=args.max_skip)
252
+ return dataset
253
+
.history/datasets/ytvos_ref_20250114202340.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.vid_data, self.vid_meta = self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+ return vid_meta, vid_data
74
+ for exp_id, exp_dict in vid_data['expressions'].items():
75
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
76
+ start_idx , end_idx = 2, vid_len-2
77
+ bin_size = (end_idx - start_idx) // 4
78
+
79
+ bins = []
80
+ for i in range(4):
81
+ bin_start = start_idx + i * bin_size
82
+ bin_end = bin_start + bin_size if i < 3 else end_idx
83
+
84
+ bins.append((bin_start, bin_end))
85
+
86
+ # Random sample one frame from each bin
87
+ sample_indx = []
88
+ for start_idx, end_idx in bins:
89
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
90
+ sample_indx.sort() # Ensure indices are in order
91
+
92
+
93
+ for sample_id in sample_indx:
94
+ meta = {
95
+ 'video': vid,
96
+ 'exp': exp_dict['exp'],
97
+ 'obj_id': int(exp_dict['obj_id']),
98
+ 'frames': vid_frames,
99
+ 'sample_id' : sample_id,
100
+ 'sample_frames_id' : sample_indx,
101
+ 'bins': bins,
102
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
103
+ }
104
+ self.metas.append(meta)
105
+
106
+ print(f"skipped {skip_vid_count} short videos")
107
+
108
+
109
+ @staticmethod
110
+ def bounding_box(img):
111
+ rows = np.any(img, axis=1)
112
+ cols = np.any(img, axis=0)
113
+ rmin, rmax = np.where(rows)[0][[0, -1]]
114
+ cmin, cmax = np.where(cols)[0][[0, -1]]
115
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
116
+
117
+ def __len__(self):
118
+ return len(self.metas)
119
+
120
+ def __getitem__(self, idx):
121
+ instance_check = False
122
+ while not instance_check:
123
+ meta = self.metas[idx] # dict
124
+
125
+
126
+ video, exp, obj_id, category, frames, sample_id, sample_frames_id, bins = \
127
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['sample_id'], meta['sample_frames_id'], meta['bins']
128
+
129
+
130
+ # clean up the caption
131
+ exp = " ".join(exp.lower().split())
132
+ category_id = category_dict[category]
133
+ vid_len = len(frames)
134
+
135
+ # num_frames = self.num_frames
136
+
137
+ # read frames and masks
138
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
139
+ for frame_indx in sample_frames_id:
140
+ frame_name = frames[frame_indx]
141
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
142
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
143
+ img = Image.open(img_path).convert('RGB')
144
+ mask = Image.open(mask_path).convert('P')
145
+
146
+ # create the target
147
+ label = torch.tensor(category_id)
148
+ mask = np.array(mask)
149
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
150
+ if (mask > 0).any():
151
+ y1, y2, x1, x2 = self.bounding_box(mask)
152
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
153
+ valid.append(1)
154
+ else: # some frame didn't contain the instance
155
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
156
+ valid.append(0)
157
+ mask = torch.from_numpy(mask)
158
+
159
+ # append
160
+ imgs.append(img)
161
+ labels.append(label)
162
+ masks.append(mask)
163
+ boxes.append(box)
164
+
165
+ # transform
166
+ w, h = img.size
167
+ labels = torch.stack(labels, dim=0)
168
+ boxes = torch.stack(boxes, dim=0)
169
+ boxes[:, 0::2].clamp_(min=0, max=w)
170
+ boxes[:, 1::2].clamp_(min=0, max=h)
171
+ masks = torch.stack(masks, dim=0)
172
+ target = {
173
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
174
+ 'labels': labels, # [T,]
175
+ 'boxes': boxes, # [T, 4], xyxy
176
+ 'masks': masks, # [T, H, W]
177
+ 'valid': torch.tensor(valid), # [T,]
178
+ 'caption': exp,
179
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
180
+ 'size': torch.as_tensor([int(h), int(w)])
181
+ }
182
+
183
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
184
+ if self._transforms:
185
+ imgs, target = self._transforms(imgs, target)
186
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
187
+ else:
188
+ imgs = np.array(imgs)
189
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
190
+
191
+
192
+ # FIXME: handle "valid", since some box may be removed due to random crop
193
+ if torch.any(target['valid'] == 1): # at leatst one instance
194
+ instance_check = True
195
+ else:
196
+ idx = random.randint(0, self.__len__() - 1)
197
+
198
+ return imgs, target
199
+
200
+
201
+ def make_coco_transforms(image_set, max_size=640):
202
+ normalize = T.Compose([
203
+ T.ToTensor(),
204
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
205
+ ])
206
+
207
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
208
+
209
+ if image_set == 'train':
210
+ return T.Compose([
211
+ T.RandomHorizontalFlip(),
212
+ T.PhotometricDistort(),
213
+ T.RandomSelect(
214
+ T.Compose([
215
+ T.RandomResize(scales, max_size=max_size),
216
+ T.Check(),
217
+ ]),
218
+ T.Compose([
219
+ T.RandomResize([400, 500, 600]),
220
+ T.RandomSizeCrop(384, 600),
221
+ T.RandomResize(scales, max_size=max_size),
222
+ T.Check(),
223
+ ])
224
+ ),
225
+ normalize,
226
+ ])
227
+
228
+ # we do not use the 'val' set since the annotations are inaccessible
229
+ if image_set == 'val':
230
+ return T.Compose([
231
+ T.RandomResize([360], max_size=640),
232
+ normalize,
233
+ ])
234
+
235
+ raise ValueError(f'unknown {image_set}')
236
+
237
+
238
+ def build(image_set, args):
239
+ root = Path(args.ytvos_path)
240
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
241
+ PATHS = {
242
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
243
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
244
+ }
245
+ img_folder, ann_file = PATHS[image_set]
246
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
247
+ # num_frames=args.num_frames, max_skip=args.max_skip)
248
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
249
+ num_frames=args.num_frames, max_skip=args.max_skip)
250
+ return dataset
251
+
.history/datasets/ytvos_ref_20250114205314.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+ for exp_id, exp_dict in vid_data['expressions'].items():
74
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
75
+ start_idx , end_idx = 2, vid_len-2
76
+ bin_size = (end_idx - start_idx) // 4
77
+
78
+ bins = []
79
+ for i in range(4):
80
+ bin_start = start_idx + i * bin_size
81
+ bin_end = bin_start + bin_size if i < 3 else end_idx
82
+
83
+ bins.append((bin_start, bin_end))
84
+
85
+ # Random sample one frame from each bin
86
+ sample_indx = []
87
+ for start_idx, end_idx in bins:
88
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
89
+ sample_indx.sort() # Ensure indices are in order
90
+
91
+
92
+ for sample_id in sample_indx:
93
+ meta = {
94
+ 'video': vid,
95
+ 'exp': exp_dict['exp'],
96
+ 'obj_id': int(exp_dict['obj_id']),
97
+ 'frames': vid_frames,
98
+ 'sample_id' : sample_id,
99
+ 'sample_frames_id' : sample_indx,
100
+ 'bins': bins,
101
+ 'category': vid_meta['objects'][exp_dict['obj_id']]['category']
102
+ }
103
+ self.metas.append(meta)
104
+
105
+ print(f"skipped {skip_vid_count} short videos")
106
+
107
+
108
+ @staticmethod
109
+ def bounding_box(img):
110
+ rows = np.any(img, axis=1)
111
+ cols = np.any(img, axis=0)
112
+ rmin, rmax = np.where(rows)[0][[0, -1]]
113
+ cmin, cmax = np.where(cols)[0][[0, -1]]
114
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
115
+
116
+ def __len__(self):
117
+ return len(self.metas)
118
+
119
+ def __getitem__(self, idx):
120
+ instance_check = False
121
+ while not instance_check:
122
+ meta = self.metas[idx] # dict
123
+
124
+
125
+ video, exp, obj_id, category, frames, sample_id, sample_frames_id, bins = \
126
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['sample_id'], meta['sample_frames_id'], meta['bins']
127
+
128
+
129
+ # clean up the caption
130
+ exp = " ".join(exp.lower().split())
131
+ category_id = category_dict[category]
132
+ vid_len = len(frames)
133
+
134
+ # num_frames = self.num_frames
135
+
136
+ # read frames and masks
137
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
138
+ for frame_indx in sample_frames_id:
139
+ frame_name = frames[frame_indx]
140
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
141
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
142
+ img = Image.open(img_path).convert('RGB')
143
+ mask = Image.open(mask_path).convert('P')
144
+
145
+ # create the target
146
+ label = torch.tensor(category_id)
147
+ mask = np.array(mask)
148
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
149
+ if (mask > 0).any():
150
+ y1, y2, x1, x2 = self.bounding_box(mask)
151
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
152
+ valid.append(1)
153
+ else: # some frame didn't contain the instance
154
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
155
+ valid.append(0)
156
+ mask = torch.from_numpy(mask)
157
+
158
+ # append
159
+ imgs.append(img)
160
+ labels.append(label)
161
+ masks.append(mask)
162
+ boxes.append(box)
163
+
164
+ # transform
165
+ w, h = img.size
166
+ labels = torch.stack(labels, dim=0)
167
+ boxes = torch.stack(boxes, dim=0)
168
+ boxes[:, 0::2].clamp_(min=0, max=w)
169
+ boxes[:, 1::2].clamp_(min=0, max=h)
170
+ masks = torch.stack(masks, dim=0)
171
+ target = {
172
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
173
+ 'labels': labels, # [T,]
174
+ 'boxes': boxes, # [T, 4], xyxy
175
+ 'masks': masks, # [T, H, W]
176
+ 'valid': torch.tensor(valid), # [T,]
177
+ 'caption': exp,
178
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
179
+ 'size': torch.as_tensor([int(h), int(w)])
180
+ }
181
+
182
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
183
+ if self._transforms:
184
+ imgs, target = self._transforms(imgs, target)
185
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
186
+ else:
187
+ imgs = np.array(imgs)
188
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
189
+
190
+
191
+ # FIXME: handle "valid", since some box may be removed due to random crop
192
+ if torch.any(target['valid'] == 1): # at leatst one instance
193
+ instance_check = True
194
+ else:
195
+ idx = random.randint(0, self.__len__() - 1)
196
+
197
+ return imgs, target
198
+
199
+
200
+ def make_coco_transforms(image_set, max_size=640):
201
+ normalize = T.Compose([
202
+ T.ToTensor(),
203
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
204
+ ])
205
+
206
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
207
+
208
+ if image_set == 'train':
209
+ return T.Compose([
210
+ T.RandomHorizontalFlip(),
211
+ T.PhotometricDistort(),
212
+ T.RandomSelect(
213
+ T.Compose([
214
+ T.RandomResize(scales, max_size=max_size),
215
+ T.Check(),
216
+ ]),
217
+ T.Compose([
218
+ T.RandomResize([400, 500, 600]),
219
+ T.RandomSizeCrop(384, 600),
220
+ T.RandomResize(scales, max_size=max_size),
221
+ T.Check(),
222
+ ])
223
+ ),
224
+ normalize,
225
+ ])
226
+
227
+ # we do not use the 'val' set since the annotations are inaccessible
228
+ if image_set == 'val':
229
+ return T.Compose([
230
+ T.RandomResize([360], max_size=640),
231
+ normalize,
232
+ ])
233
+
234
+ raise ValueError(f'unknown {image_set}')
235
+
236
+
237
+ def build(image_set, args):
238
+ root = Path(args.ytvos_path)
239
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
240
+ PATHS = {
241
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
242
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
243
+ }
244
+ img_folder, ann_file = PATHS[image_set]
245
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
246
+ # num_frames=args.num_frames, max_skip=args.max_skip)
247
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
248
+ num_frames=args.num_frames, max_skip=args.max_skip)
249
+ return dataset
250
+
.history/datasets/ytvos_ref_20250114211305.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+
74
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
75
+ start_idx , end_idx = 2, vid_len-2
76
+ bin_size = (end_idx - start_idx) // 4
77
+
78
+ bins = []
79
+ for i in range(4):
80
+ bin_start = start_idx + i * bin_size
81
+ bin_end = bin_start + bin_size if i < 3 else end_idx
82
+
83
+ bins.append((bin_start, bin_end))
84
+
85
+ # Random sample one frame from each bin
86
+ sample_indx = []
87
+ for start_idx, end_idx in bins:
88
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
89
+ sample_indx.sort() # Ensure indices are in order
90
+
91
+
92
+ meta = {
93
+ 'video':vid,
94
+ 'sample_indx':sample_indx,
95
+ 'bins':bins
96
+ }
97
+ obj_id_cat = {}
98
+ for exp_id, exp_dict in vid_data['expressions'].items():
99
+ obj_id = exp_dict['obj_id']
100
+ print(obj_id, type(obj_id))
101
+ print(vid_meta['objects'].keys())
102
+ if obj_id not in obj_id_cat:
103
+ obj_id_cat[obj_id] = vid_meta[obj_id]['category']
104
+ meta['obj_id_cat'] = obj_id_cat
105
+ self.metas.append(meta)
106
+
107
+ print(f"skipped {skip_vid_count} short videos")
108
+
109
+
110
+ @staticmethod
111
+ def bounding_box(img):
112
+ rows = np.any(img, axis=1)
113
+ cols = np.any(img, axis=0)
114
+ rmin, rmax = np.where(rows)[0][[0, -1]]
115
+ cmin, cmax = np.where(cols)[0][[0, -1]]
116
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
117
+
118
+ def __len__(self):
119
+ return len(self.metas)
120
+
121
+ def __getitem__(self, idx):
122
+ instance_check = False
123
+ while not instance_check:
124
+ meta = self.metas[idx] # dict
125
+
126
+
127
+ video, exp, obj_id, category, frames, sample_id, sample_frames_id, bins = \
128
+ meta['video'], meta['exp'], meta['obj_id'], meta['category'], meta['frames'], meta['sample_id'], meta['sample_frames_id'], meta['bins']
129
+
130
+
131
+ # clean up the caption
132
+ exp = " ".join(exp.lower().split())
133
+ category_id = category_dict[category]
134
+ vid_len = len(frames)
135
+
136
+ # num_frames = self.num_frames
137
+
138
+ # read frames and masks
139
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
140
+ for frame_indx in sample_frames_id:
141
+ frame_name = frames[frame_indx]
142
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
143
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
144
+ img = Image.open(img_path).convert('RGB')
145
+ mask = Image.open(mask_path).convert('P')
146
+
147
+ # create the target
148
+ label = torch.tensor(category_id)
149
+ mask = np.array(mask)
150
+ mask = (mask==obj_id).astype(np.float32) # 0,1 binary
151
+ if (mask > 0).any():
152
+ y1, y2, x1, x2 = self.bounding_box(mask)
153
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
154
+ valid.append(1)
155
+ else: # some frame didn't contain the instance
156
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
157
+ valid.append(0)
158
+ mask = torch.from_numpy(mask)
159
+
160
+ # append
161
+ imgs.append(img)
162
+ labels.append(label)
163
+ masks.append(mask)
164
+ boxes.append(box)
165
+
166
+ # transform
167
+ w, h = img.size
168
+ labels = torch.stack(labels, dim=0)
169
+ boxes = torch.stack(boxes, dim=0)
170
+ boxes[:, 0::2].clamp_(min=0, max=w)
171
+ boxes[:, 1::2].clamp_(min=0, max=h)
172
+ masks = torch.stack(masks, dim=0)
173
+ target = {
174
+ 'frames_idx': torch.tensor(sample_frames_id), # [T,]
175
+ 'labels': labels, # [T,]
176
+ 'boxes': boxes, # [T, 4], xyxy
177
+ 'masks': masks, # [T, H, W]
178
+ 'valid': torch.tensor(valid), # [T,]
179
+ 'caption': exp,
180
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
181
+ 'size': torch.as_tensor([int(h), int(w)])
182
+ }
183
+
184
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
185
+ if self._transforms:
186
+ imgs, target = self._transforms(imgs, target)
187
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
188
+ else:
189
+ imgs = np.array(imgs)
190
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
191
+
192
+
193
+ # FIXME: handle "valid", since some box may be removed due to random crop
194
+ if torch.any(target['valid'] == 1): # at leatst one instance
195
+ instance_check = True
196
+ else:
197
+ idx = random.randint(0, self.__len__() - 1)
198
+
199
+ return imgs, target
200
+
201
+
202
+ def make_coco_transforms(image_set, max_size=640):
203
+ normalize = T.Compose([
204
+ T.ToTensor(),
205
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
206
+ ])
207
+
208
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
209
+
210
+ if image_set == 'train':
211
+ return T.Compose([
212
+ T.RandomHorizontalFlip(),
213
+ T.PhotometricDistort(),
214
+ T.RandomSelect(
215
+ T.Compose([
216
+ T.RandomResize(scales, max_size=max_size),
217
+ T.Check(),
218
+ ]),
219
+ T.Compose([
220
+ T.RandomResize([400, 500, 600]),
221
+ T.RandomSizeCrop(384, 600),
222
+ T.RandomResize(scales, max_size=max_size),
223
+ T.Check(),
224
+ ])
225
+ ),
226
+ normalize,
227
+ ])
228
+
229
+ # we do not use the 'val' set since the annotations are inaccessible
230
+ if image_set == 'val':
231
+ return T.Compose([
232
+ T.RandomResize([360], max_size=640),
233
+ normalize,
234
+ ])
235
+
236
+ raise ValueError(f'unknown {image_set}')
237
+
238
+
239
+ def build(image_set, args):
240
+ root = Path(args.ytvos_path)
241
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
242
+ PATHS = {
243
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
244
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
245
+ }
246
+ img_folder, ann_file = PATHS[image_set]
247
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
248
+ # num_frames=args.num_frames, max_skip=args.max_skip)
249
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
250
+ num_frames=args.num_frames, max_skip=args.max_skip)
251
+ return dataset
252
+
.history/datasets/ytvos_ref_20250116074326.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.autograd.grad_mode import F
8
+ from torch.utils.data import Dataset
9
+ import datasets.transforms_video as T
10
+
11
+ import os
12
+ from PIL import Image
13
+ import json
14
+ import numpy as np
15
+ import random
16
+
17
+ from datasets.categories import ytvos_category_dict as category_dict
18
+
19
+
20
+ class YTVOSDataset(Dataset):
21
+ """
22
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
23
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
24
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
25
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
26
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
27
+ through the Youtube-VOS referring video object segmentation competition page at:
28
+ https://competitions.codalab.org/competitions/29139
29
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
30
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
31
+ currently only be done on the competition 'validation' subset using the competition's server, as
32
+ annotations were publicly released only for the 'train' subset of the competition.
33
+
34
+ """
35
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
36
+ num_frames: int, max_skip: int):
37
+ self.img_folder = img_folder
38
+ self.ann_file = ann_file
39
+ self._transforms = transforms
40
+ self.return_masks = return_masks # not used
41
+ self.num_frames = num_frames
42
+ self.max_skip = max_skip
43
+ # create video meta data
44
+ self.prepare_metas()
45
+
46
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
47
+ print('\n')
48
+
49
+ def prepare_metas(self):
50
+ # read object information
51
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
52
+ subset_metas_by_video = json.load(f)['videos']
53
+
54
+ # read expression data
55
+ with open(str(self.ann_file), 'r') as f:
56
+ subset_expressions_by_video = json.load(f)['videos']
57
+ self.videos = list(subset_expressions_by_video.keys())
58
+
59
+ self.metas = []
60
+ skip_vid_count = 0
61
+
62
+ for vid in self.videos:
63
+ vid_meta = subset_metas_by_video[vid]
64
+ vid_data = subset_expressions_by_video[vid]
65
+ vid_frames = sorted(vid_data['frames'])
66
+ vid_len = len(vid_frames)
67
+
68
+ if vid_len < 11:
69
+ #print(f"Too short video: {vid} with frame length {vid_len}")
70
+ skip_vid_count += 1
71
+ continue
72
+
73
+
74
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
75
+ start_idx , end_idx = 2, vid_len-2
76
+ bin_size = (end_idx - start_idx) // 4
77
+
78
+ bins = []
79
+ for i in range(4):
80
+ bin_start = start_idx + i * bin_size
81
+ bin_end = bin_start + bin_size if i < 3 else end_idx
82
+
83
+ bins.append((bin_start, bin_end))
84
+
85
+ # Random sample one frame from each bin
86
+ sample_indx = []
87
+ for start_idx, end_idx in bins:
88
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
89
+ sample_indx.sort() # Ensure indices are in order
90
+
91
+
92
+ meta = {
93
+ 'video':vid,
94
+ 'sample_indx':sample_indx,
95
+ 'bins':bins,
96
+ 'frames':vid_frames
97
+ }
98
+ obj_id_cat = {}
99
+ for exp_id, exp_dict in vid_data['expressions'].items():
100
+ obj_id = exp_dict['obj_id']
101
+ if obj_id not in obj_id_cat:
102
+ obj_id_cat[obj_id] = vid_meta['objects'][obj_id]['category']
103
+ meta['obj_id_cat'] = obj_id_cat
104
+ self.metas.append(meta)
105
+
106
+ print(f"skipped {skip_vid_count} short videos")
107
+
108
+
109
+ @staticmethod
110
+ def bounding_box(img):
111
+ rows = np.any(img, axis=1)
112
+ cols = np.any(img, axis=0)
113
+ rmin, rmax = np.where(rows)[0][[0, -1]]
114
+ cmin, cmax = np.where(cols)[0][[0, -1]]
115
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
116
+
117
+ def __len__(self):
118
+ return len(self.metas)
119
+
120
+ def __getitem__(self, idx):
121
+ meta = self.metas[idx] # dict
122
+
123
+ video, sample_indx, bins, frames, obj_id_cat = \
124
+ meta['video'], meta['sample_indx'], meta['bins'], meta['frames'], meta['obj_id_cat']
125
+
126
+ # read frames and masks
127
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
128
+ for frame_indx in sample_indx:
129
+ frame_name = frames[frame_indx]
130
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
131
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
132
+ img = Image.open(img_path).convert('RGB')
133
+ imgs.append(img)
134
+
135
+ mask = Image.open(mask_path).convert('P')
136
+ mask = np.array(mask)
137
+
138
+ # create the target
139
+ for obj_id in list(obj_id_cat.keys()):
140
+ obj_mask = (mask==int(obj_id)).astype(np.float32) # 0,1 binary
141
+ if (obj_mask > 0).any():
142
+ y1, y2, x1, x2 = self.bounding_box(obj_mask)
143
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
144
+ valid.append(1)
145
+ else: # some frame didn't contain the instance
146
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
147
+ valid.append(0)
148
+ obj_mask = torch.from_numpy(obj_mask)
149
+
150
+ # append
151
+ masks.append(obj_mask)
152
+ boxes.append(box)
153
+
154
+
155
+ # transform
156
+ w, h = img.size
157
+ boxes = torch.stack(boxes, dim=0)
158
+ boxes[:, 0::2].clamp_(min=0, max=w)
159
+ boxes[:, 1::2].clamp_(min=0, max=h)
160
+ masks = torch.stack(masks, dim=0)
161
+ target = {
162
+ 'frames_idx': sample_indx, # [T,]
163
+ 'boxes': boxes, # [T, 4], xyxy
164
+ 'masks': masks, # [T, H, W]
165
+ 'valid': torch.tensor(valid), # [T,]
166
+ 'obj_ids' : list(obj_id_cat.keys()),
167
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
168
+ 'size': torch.as_tensor([int(h), int(w)])
169
+ }
170
+
171
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
172
+ if self._transforms:
173
+ imgs, target = self._transforms(imgs, target)
174
+ imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
175
+ else:
176
+ imgs = np.array(imgs)
177
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
178
+
179
+
180
+ # # FIXME: handle "valid", since some box may be removed due to random crop
181
+ # if torch.any(target['valid'] == 1): # at leatst one instance
182
+ # instance_check = True
183
+ # else:
184
+ # idx = random.randint(0, self.__len__() - 1)
185
+
186
+ return imgs, target
187
+
188
+
189
+ def make_coco_transforms(image_set, max_size=640):
190
+ normalize = T.Compose([
191
+ T.ToTensor(),
192
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
193
+ ])
194
+
195
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
196
+
197
+ if image_set == 'train':
198
+ return T.Compose([
199
+ T.RandomHorizontalFlip(),
200
+ T.PhotometricDistort(),
201
+ T.RandomSelect(
202
+ T.Compose([
203
+ T.RandomResize(scales, max_size=max_size),
204
+ T.Check(),
205
+ ]),
206
+ T.Compose([
207
+ T.RandomResize([400, 500, 600]),
208
+ T.RandomSizeCrop(384, 600),
209
+ T.RandomResize(scales, max_size=max_size),
210
+ T.Check(),
211
+ ])
212
+ ),
213
+ normalize,
214
+ ])
215
+
216
+ # we do not use the 'val' set since the annotations are inaccessible
217
+ if image_set == 'val':
218
+ return T.Compose([
219
+ T.RandomResize([360], max_size=640),
220
+ normalize,
221
+ ])
222
+
223
+ raise ValueError(f'unknown {image_set}')
224
+
225
+
226
+ def build(image_set, args):
227
+ root = Path(args.ytvos_path)
228
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
229
+ PATHS = {
230
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
231
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
232
+ }
233
+ img_folder, ann_file = PATHS[image_set]
234
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
235
+ # num_frames=args.num_frames, max_skip=args.max_skip)
236
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
237
+ num_frames=args.num_frames, max_skip=args.max_skip)
238
+ return dataset
239
+
.history/mbench/gpt_ref-ytvos-cy_20250121151513.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path as osp
3
+ sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
4
+
5
+ from mbench.ytvos_ref import build as build_ytvos_ref
6
+ import argparse
7
+ import opts
8
+
9
+ import sys
10
+ from pathlib import Path
11
+ import os
12
+ from os import path as osp
13
+ import skimage
14
+ from io import BytesIO
15
+
16
+ import numpy as np
17
+ import pandas as pd
18
+ import regex as re
19
+ import json
20
+
21
+ import cv2
22
+ from PIL import Image, ImageDraw
23
+ import torch
24
+ from torchvision.transforms import functional as F
25
+
26
+ from skimage import measure # (pip install scikit-image)
27
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
28
+
29
+ import matplotlib.pyplot as plt
30
+ import matplotlib.patches as patches
31
+ from matplotlib.collections import PatchCollection
32
+ from matplotlib.patches import Rectangle
33
+
34
+
35
+ import ipywidgets as widgets
36
+ from IPython.display import display, clear_output
37
+
38
+ from openai import OpenAI
39
+ import base64
40
+
41
+ # Function to encode the image
42
+ def encode_image(image_path):
43
+ with open(image_path, "rb") as image_file:
44
+ return base64.b64encode(image_file.read()).decode("utf-8")
45
+
46
+
47
+ # Captioner
48
+ ytvos_category_valid_list = [
49
+ 'airplane', 'ape', 'bear', 'bike', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
50
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
51
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
52
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
53
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
54
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
55
+ ]
56
+ def getCaption(video_id, json_data):
57
+ #데이터 가져오기
58
+ video_data = json_data[video_id]
59
+ frame_names = video_data['frame_names']
60
+ video_path = video_data['video_path']
61
+
62
+ cat_names = set()
63
+ all_captions = dict()
64
+ for obj_id in list(video_data['annotations'][0].keys()):
65
+ cat_names.add(video_data['annotations'][0][obj_id]['category_name'])
66
+
67
+ # cat_names : person, snowboard
68
+ # 1. gpt에서 직접 action의 대상이 될 수 있는가 물어보기
69
+ # 2. ref-youtube-vos 에서 제공하는 카테고리 정보에서 우리가 처리하고 싶은 카테고리 이름만 남긴다
70
+
71
+ for cat_name in list(cat_names) :
72
+ image_paths = [os.path.join(video_path, frame_name + '.jpg') for frame_name in frame_names]
73
+ image_captions = {}
74
+
75
+ captioner = OpenAI()
76
+
77
+ #0단계: action의 대상이 될 수 있는가?
78
+ is_movable = False
79
+ if cat_name in ytvos_category_valid_list :
80
+ is_movable = True
81
+
82
+ # response_check = captioner.chat.completions.create(
83
+ # model="gpt-4o",
84
+ # messages=[
85
+ # {
86
+ # "role": "user",
87
+ # "content": f"""
88
+ # Can a {cat_name} be a subject of distinct actions or movements?
89
+ # For example, if {cat_name} is a person, animal, or vehicle, it is likely an action-capable subject.
90
+ # However, if it is an inanimate object like a snowboard, tree, or book, it cannot independently perform actions.
91
+ # Respond with YES if {cat_name} can perform distinct actions or movements; otherwise, respond with NONE.
92
+ # Answer only YES or NONE.
93
+ # """
94
+ # }
95
+ # ],
96
+ # )
97
+ # response_check_content = response_check.choices[0].message.content.strip().lower()
98
+ # print(f"Movable Check for {cat_name}: {response_check_content}")
99
+
100
+ # if response_check_content == "yes": is_movable = True
101
+
102
+ if not is_movable:
103
+ print(f"Skipping {cat_name}: Determined to be non-movable.")
104
+ continue
105
+
106
+ for i in range(len(image_paths)):
107
+ image_path = image_paths[i]
108
+ frame_name = frame_names[i]
109
+ base64_image = encode_image(image_path)
110
+
111
+ #1단계: 필터링
112
+ print(cat_name, frame_name)
113
+ response1 = captioner.chat.completions.create(
114
+ model="gpt-4o",
115
+ messages=[
116
+ {
117
+ "role": "user",
118
+ "content": [
119
+ {
120
+ "type": "text",
121
+
122
+ "text": f"""Are there multiple {cat_name}s in the image, each performing distinct and recognizable actions?
123
+ Focus only on clear and prominent actions, avoiding minor or ambiguous ones.
124
+ Each action should be unique and clearly associated with a specific object.
125
+
126
+ Respond with YES if:
127
+ - The {cat_name}s are people, animals or vehicles, and their actions are distinct and recognizable.
128
+ - The {cat_name}s involve clear, distinguishable actions performed independently.
129
+
130
+ Respond with NONE if:
131
+ - The {cat_name}s are objects (e.g., snowboard, tree, books) and do not involve direct interaction with a person.
132
+ - Actions are ambiguous, minor, or not clearly visible.
133
+
134
+ If the {cat_name} is 'snowboard' and it is not actively being used or interacted with by a person, output NONE.
135
+ If the {cat_name} is 'person' and their actions are distinct and clear, output YES.
136
+
137
+ Answer only YES or NONE."""
138
+
139
+ },
140
+ {
141
+ "type": "image_url",
142
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
143
+ },
144
+ ],
145
+ }
146
+ ],
147
+ )
148
+ response_content = response1.choices[0].message.content
149
+ should_caption = True if "yes" in response_content.lower() else False
150
+ print(f"are {cat_name}s distinguished by action: {response_content}")
151
+
152
+ #2단계: dense caption 만들기
153
+ if should_caption:
154
+ response2 = captioner.chat.completions.create(
155
+ model="gpt-4o-mini",
156
+ messages=[
157
+ {
158
+ "role": "user",
159
+ "content": [
160
+ {
161
+ "type": "text",
162
+
163
+ "text": f"""
164
+ Generate a detailed action-centric caption describing the actions of the {cat_name}s in the image.
165
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
166
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
167
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
168
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
169
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
170
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
171
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
172
+ 8. Include interactions with objects or other entities when they are prominent and observable.
173
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
174
+ Output only the caption.""",
175
+ },
176
+ {
177
+ "type": "image_url",
178
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
179
+ },
180
+ ],
181
+ }
182
+ ],
183
+ )
184
+
185
+ caption = response2.choices[0].message.content
186
+ print(f"{image_path} - {frame_name}: {caption}")
187
+ else:
188
+ caption = None
189
+
190
+ image_captions[frame_name] = caption
191
+ all_captions[cat_name] = image_captions
192
+
193
+ # final : also prepare valid object ids
194
+ valid_obj_ids = []
195
+ valid_cat_names = list(all_captions.keys())
196
+ for obj_id in list(video_data['annotations'][0].keys()):
197
+ cat = video_data['annotations'][0][obj_id]['category_name']
198
+ if cat in valid_cat_names : valid_obj_ids.append(obj_id)
199
+
200
+ return all_captions, valid_obj_ids
201
+
202
+
203
+ # Referring expression generator and QA filter
204
+ def getRefExp(video_id, frame_name, caption, obj_id, json_data):
205
+ # 이미지에 해당 물체 바운딩 박스 그리기
206
+ video_data = json_data[video_id]
207
+ frame_names = video_data['frame_names']
208
+ video_path = video_data['video_path']
209
+ I = skimage.io.imread(osp.join(video_path, frame_name + '.jpg'))
210
+ frame_indx = frame_names.index(frame_name)
211
+ obj_data = video_data['annotations'][frame_indx][obj_id]
212
+
213
+ bbox = obj_data['bbox']
214
+ cat_name = obj_data['category_name']
215
+ valid = obj_data['valid']
216
+
217
+ if valid == 0:
218
+ print("Object not in this frame!")
219
+ return {}
220
+
221
+
222
+ x_min, y_min, x_max, y_max = bbox
223
+ x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)
224
+ cv2.rectangle(I, (x_min, y_min), (x_max, y_max), (225, 0, 0), 2)
225
+ plt.figure()
226
+ plt.imshow(I)
227
+ plt.axis('off')
228
+ plt.show()
229
+
230
+ #cropped object for visibility check
231
+ cropped_I = I[y_min:y_max, x_min:x_max]
232
+ pil_cropped_I = Image.fromarray(cropped_I)
233
+ buff_crop = BytesIO()
234
+ pil_cropped_I.save(buff_crop, format='JPEG')
235
+ base64_cropped_I = base64.b64encode(buff_crop.getvalue()).decode("utf-8")
236
+
237
+ #entire image for referring expression generation
238
+ pil_I = Image.fromarray(I)
239
+ buff = BytesIO()
240
+ pil_I.save(buff, format='JPEG')
241
+ base64_I = base64.b64encode(buff.getvalue()).decode("utf-8")
242
+
243
+ # 구분 가능 여부 확인
244
+ generator = OpenAI()
245
+ response_check = generator.chat.completions.create(
246
+ model="chatgpt-4o-latest",
247
+ messages=[
248
+ {
249
+ "role": "user",
250
+ "content": [
251
+ {
252
+
253
+ "type": "text",
254
+ "text": f"""Can the {cat_name} in the provided cropped image be clearly identified as belonging to the category {cat_name}?
255
+ Focus on whether the cropped image provides enough visible features (e.g., ears, head shape, fur texture) to confirm that it is a {cat_name}, even if the full body is not visible.
256
+
257
+ Guidelines:
258
+ - If the visible features (like ears, fur texture or head shape) are sufficient to identify the {cat_name}, respond with YES.
259
+ - If multiple {cat_name}s are entangled or overlapping, making it difficult to distinguish one from another, respond with NONE.
260
+ - If the object is clearly visible and identifiable as a {cat_name}, respond with YES.
261
+
262
+ Output only either YES or NONE.
263
+ """
264
+ },
265
+ {
266
+ "type": "image_url",
267
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_cropped_I}"},
268
+ }
269
+ ]
270
+ },
271
+ ]
272
+ )
273
+
274
+ response_check_content = response_check.choices[0].message.content.strip().lower()
275
+ print(f"is object {obj_id} visible: {response_check_content}")
276
+
277
+ if "yes" not in response_check_content:
278
+ print(f"Referring expression not generated: {cat_name} is ambiguous in this frame.")
279
+ return {"ref_exp": "NONE", "caption": caption, "cat_name": cat_name, "file_name": frame_name, "isValid" : False}
280
+
281
+ # Referring expression 만들기
282
+ # generator = OpenAI()
283
+ response = generator.chat.completions.create(
284
+ model="chatgpt-4o-latest",
285
+ messages=[
286
+ {
287
+ "role": "user",
288
+ "content": [
289
+ {
290
+ "type": "text",
291
+
292
+ "text": f"""Based on the dense caption, create a referring expression for the {cat_name} highlighted with the red box, corresponding to Object ID {obj_id}.
293
+ Guidelines for creating the referring expression:
294
+ 1. The referring expression should describe the prominent actions or poses of the highlighted {cat_name} (Object ID {obj_id}).
295
+ 2. Focus on the behavior or pose described in the caption that is specifically associated with this {cat_name}. Do not include actions or poses of other {cat_name}s.
296
+ 3. If multiple {cat_name}s are present, ensure that the referring expression exclusively describes the {cat_name} corresponding to Object ID {obj_id}.
297
+ 4. Avoid ambiguous or subjective terms. Use specific and clear action verbs to describe the highlighted {cat_name}.
298
+ 5. The referring expression should only describe Object ID {obj_id} and not any other objects or entities.
299
+ 6. Use '{cat_name}' as the noun for the referring expressions.
300
+ Output only the referring expression for the highlighted {cat_name} (Object ID {obj_id}).
301
+
302
+ {caption}
303
+ """
304
+ },
305
+ {
306
+ "type": "image_url",
307
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
308
+ },
309
+ # {
310
+ # "type": "image_url",
311
+ # "image_url": {"url": f"data:image/jpeg;base64,{base64_cropped_I}"},
312
+ # }
313
+ ],
314
+ }
315
+ ],
316
+ )
317
+
318
+ ref_exp = response.choices[0].message.content.strip()
319
+
320
+ #QA filtering
321
+ #QA1: 원하는 물체를 설명하는지
322
+ filter = OpenAI()
323
+ response1 = filter.chat.completions.create(
324
+ model="gpt-4o",
325
+ messages=[
326
+ {
327
+ "role": "user",
328
+ "content": [
329
+ {
330
+ "type": "text",
331
+ "text": f"""Does the given expression describe the {cat_name} highlighted with the red box? If so, only return YES and if not, NO.
332
+ {ref_exp}""",
333
+ },
334
+ {
335
+ "type": "image_url",
336
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
337
+ },
338
+ ],
339
+ }
340
+ ],
341
+ )
342
+
343
+ response1_content = response1.choices[0].message.content
344
+ describesHighlighted = True if "yes" in response1_content.lower() else False
345
+
346
+ #QA2: 원하지 않는 물체를 설명하지 않는지
347
+ response2 = filter.chat.completions.create(
348
+ model="gpt-4o-mini",
349
+ messages=[
350
+ {
351
+ "role": "user",
352
+ "content": [
353
+ {
354
+ "type": "text",
355
+ "text": f"""Does the given expression describe the person not highlighted with the red box? If so, only return YES and if not, NO.
356
+ {ref_exp}""",
357
+ },
358
+ {
359
+ "type": "image_url",
360
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
361
+ },
362
+ ],
363
+ }
364
+ ],
365
+ )
366
+
367
+ response2_content = response2.choices[0].message.content
368
+ describesNotHighlighted = True if "yes" in response2_content.lower() else False
369
+
370
+ isValid = True if describesHighlighted and not describesNotHighlighted else False
371
+
372
+ print(f"describesHighlighted: {describesHighlighted}, describesNotHighlighted: {describesNotHighlighted}")
373
+
374
+ return {"ref_exp": ref_exp, "caption": caption, "cat_name": cat_name, "file_name": frame_name, "isValid" : isValid}
375
+
376
+
377
+
378
+ if __name__ == '__main__':
379
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
380
+ args = parser.parse_args()
381
+
382
+ #==================데이터 불러오기===================
383
+ # # 전체 데이터셋
384
+ # train_dataset = build_ytvos_ref(image_set = 'train', args = args)
385
+
386
+ # # 전체 데이터셋 메타데이터
387
+ # metas = train_dataset.metas
388
+
389
+ with open('mbench/sampled_frame3.json', 'r') as file:
390
+ data = json.load(file)
391
+
392
+ vid_ids = list(data.keys())
393
+
394
+ all_ref_exps = {}
395
+
396
+ #==================GPT 돌리기===================
397
+ os.environ['OPENAI_API_KEY'] = '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'
398
+
399
+ # 전체 데이터셋의 vid_id에 대해
400
+ for i in range(1):
401
+ vid_id = vid_ids[i]
402
+
403
+ #====캡션 만들기====
404
+ caption, valid_obj_ids = getCaption(vid_id, data)
405
+ cats_in_vid = list(caption.keys())
406
+
407
+ #====referring expression 만들고 QA filtering====
408
+ ref_expressions = {}
409
+ # 각 카테고리별로
410
+ for cat_name in cats_in_vid:
411
+ if cat_name not in ref_expressions:
412
+ ref_expressions[cat_name] = {}
413
+
414
+ # 각 비디오 프레임 별로
415
+ for frame_name in data[vid_id]['frame_names']:
416
+
417
+ if frame_name not in ref_expressions[cat_name]:
418
+ ref_expressions[cat_name][frame_name] = {} # Create frame-level dictionary
419
+
420
+ caption = caption[cat_name][frame_name]
421
+
422
+ if not caption : continue
423
+ else :
424
+ # 각 obj id별로
425
+ for obj_id in valid_obj_ids:
426
+ ref_exp = getRefExp(vid_id, frame_name, caption, obj_id, data)
427
+ ref_expressions[cat_name][frame_name][obj_id] = ref_exp # Store ref_exp
428
+
429
+
430
+ all_ref_exps[vid_id] = ref_expressions
431
+
432
+ with open('mbench/result-cy.json', 'w') as file:
433
+ json.dump(all_ref_exps, file)
.history/mbench/gpt_ref-ytvos-revised_20250121160858.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path as osp
3
+ sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
4
+
5
+ from mbench.ytvos_ref import build as build_ytvos_ref
6
+ import argparse
7
+ import opts
8
+
9
+ import sys
10
+ from pathlib import Path
11
+ import os
12
+ from os import path as osp
13
+ import skimage
14
+ from io import BytesIO
15
+
16
+ import numpy as np
17
+ import pandas as pd
18
+ import regex as re
19
+ import json
20
+
21
+ import cv2
22
+ from PIL import Image, ImageDraw
23
+ import torch
24
+ from torchvision.transforms import functional as F
25
+
26
+ from skimage import measure # (pip install scikit-image)
27
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
28
+
29
+ import matplotlib.pyplot as plt
30
+ import matplotlib.patches as patches
31
+ from matplotlib.collections import PatchCollection
32
+ from matplotlib.patches import Rectangle
33
+
34
+
35
+ import ipywidgets as widgets
36
+ from IPython.display import display, clear_output
37
+
38
+ from openai import OpenAI
39
+ import base64
40
+
41
+ # Function to encode the image
42
+ def encode_image(image_path):
43
+ with open(image_path, "rb") as image_file:
44
+ return base64.b64encode(image_file.read()).decode("utf-8")
45
+
46
+ # Captioner
47
+ ytvos_category_valid_list = [
48
+ 'airplane', 'ape', 'bear', 'bike', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
49
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
50
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
51
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
52
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
53
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
54
+ ]
55
+ def getCaption(video_id, json_data):
56
+ #데이터 가져오기
57
+ video_data = json_data[video_id]
58
+ frame_names = video_data['frame_names']
59
+ video_path = video_data['video_path']
60
+
61
+ cat_names = set()
62
+ all_captions = dict()
63
+ for obj_id in list(video_data['annotations'][0].keys()):
64
+ cat_names.add(video_data['annotations'][0][obj_id]['category_name'])
65
+
66
+ # cat_names : person, snowboard
67
+ # 1. gpt에서 직접 action의 대상이 될 수 있는가 물어보기
68
+ # 2. ref-youtube-vos 에서 제공하는 카테고리 정보에서 우리가 처리하고 싶은 카테고리 이름만 남긴다
69
+
70
+ for cat_name in list(cat_names) :
71
+ image_paths = [os.path.join(video_path, frame_name + '.jpg') for frame_name in frame_names]
72
+ image_captions = {}
73
+
74
+ captioner = OpenAI()
75
+
76
+ #0단계: action의 대상이 될 수 있는가?
77
+ is_movable = False
78
+ if cat_name in ytvos_category_valid_list :
79
+ is_movable = True
80
+
81
+ # response_check = captioner.chat.completions.create(
82
+ # model="gpt-4o",
83
+ # messages=[
84
+ # {
85
+ # "role": "user",
86
+ # "content": f"""
87
+ # Can a {cat_name} be a subject of distinct actions or movements?
88
+ # For example, if {cat_name} is a person, animal, or vehicle, it is likely an action-capable subject.
89
+ # However, if it is an inanimate object like a snowboard, tree, or book, it cannot independently perform actions.
90
+ # Respond with YES if {cat_name} can perform distinct actions or movements; otherwise, respond with NONE.
91
+ # Answer only YES or NONE.
92
+ # """
93
+ # }
94
+ # ],
95
+ # )
96
+ # response_check_content = response_check.choices[0].message.content.strip().lower()
97
+ # print(f"Movable Check for {cat_name}: {response_check_content}")
98
+
99
+ # if response_check_content == "yes": is_movable = True
100
+
101
+ if not is_movable:
102
+ print(f"Skipping {cat_name}: Determined to be non-movable.")
103
+ continue
104
+
105
+ for i in range(len(image_paths)):
106
+ image_path = image_paths[i]
107
+ frame_name = frame_names[i]
108
+ base64_image = encode_image(image_path)
109
+
110
+ #1단계: 필터링
111
+ #print(f"-----------category name: {cat_name}, frame name: {frame_name}")
112
+ response1 = captioner.chat.completions.create(
113
+ model="chatgpt-4o-latest",
114
+ messages=[
115
+ {
116
+ "role": "user",
117
+ "content": [
118
+ {
119
+ "type": "text",
120
+
121
+ "text": f"""Are there multiple {cat_name}s in the image, each performing distinct and recognizable actions?
122
+ Focus only on clear and prominent actions, avoiding minor or ambiguous ones.
123
+ Each action should be unique and clearly associated with a specific object.
124
+
125
+ Respond with YES if:
126
+ - The {cat_name}s are people, animals or vehicles, and their actions are distinct and recognizable.
127
+ - The {cat_name}s involve clear, distinguishable actions performed independently.
128
+
129
+ Respond with NONE if:
130
+ - The {cat_name}s are objects (e.g., snowboard, tree, books) and do not involve direct interaction with a person.
131
+ - Actions are ambiguous, minor, or not clearly visible.
132
+
133
+ If the {cat_name} is 'snowboard' and it is not actively being used or interacted with by a person, output NONE.
134
+ If the {cat_name} is 'person' and their actions are distinct and clear, output YES.
135
+
136
+ Answer only YES or NONE."""
137
+
138
+ },
139
+ {
140
+ "type": "image_url",
141
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
142
+ },
143
+ ],
144
+ }
145
+ ],
146
+ )
147
+ response_content = response1.choices[0].message.content
148
+ should_caption = True if "yes" in response_content.lower() else False
149
+ #print(f"are {cat_name}s distinguished by action: {response_content}")
150
+
151
+ #2단계: dense caption 만들기
152
+ if should_caption:
153
+ response2 = captioner.chat.completions.create(
154
+ model="chatgpt-4o-latest",
155
+ messages=[
156
+ {
157
+ "role": "user",
158
+ "content": [
159
+ {
160
+ "type": "text",
161
+
162
+ "text": f"""
163
+ Generate a detailed action-centric caption describing the actions of the {cat_name}s in the image.
164
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
165
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
166
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
167
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
168
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
169
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
170
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
171
+ 8. Include interactions with objects or other entities when they are prominent and observable.
172
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
173
+ Output only the caption.""",
174
+ },
175
+ {
176
+ "type": "image_url",
177
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
178
+ },
179
+ ],
180
+ }
181
+ ],
182
+ )
183
+
184
+ caption = response2.choices[0].message.content
185
+ #print(f"{image_path} - {frame_name}: {caption}")
186
+ else:
187
+ caption = None
188
+
189
+ image_captions[frame_name] = caption
190
+ all_captions[cat_name] = image_captions
191
+
192
+ # final : also prepare valid object ids
193
+ valid_obj_ids = []
194
+ valid_cat_names = list(all_captions.keys())
195
+ for obj_id in list(video_data['annotations'][0].keys()):
196
+ cat = video_data['annotations'][0][obj_id]['category_name']
197
+ if cat in valid_cat_names : valid_obj_ids.append(obj_id)
198
+
199
+ return all_captions, valid_obj_ids
200
+
201
+ # Referring expression generator and QA filter
202
+ def getRefExp(video_id, frame_name, caption, obj_id, json_data):
203
+
204
+ # 이미지에 해당 물체 바운딩 박스 그리기
205
+ video_data = json_data[video_id]
206
+ frame_names = video_data['frame_names']
207
+ video_path = video_data['video_path']
208
+ I = skimage.io.imread(osp.join(video_path, frame_name + '.jpg'))
209
+ frame_indx = frame_names.index(frame_name)
210
+ obj_data = video_data['annotations'][frame_indx][obj_id]
211
+
212
+ bbox = obj_data['bbox']
213
+ cat_name = obj_data['category_name']
214
+ valid = obj_data['valid']
215
+
216
+ if valid == 0:
217
+ print("Object not in this frame!")
218
+ return {}
219
+
220
+
221
+ x_min, y_min, x_max, y_max = bbox
222
+ x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)
223
+ cv2.rectangle(I, (x_min, y_min), (x_max, y_max), (225, 0, 0), 2)
224
+ plt.figure()
225
+ plt.imshow(I)
226
+ plt.axis('off')
227
+ plt.show()
228
+
229
+ #cropped object for visibility check
230
+ cropped_I = I[y_min:y_max, x_min:x_max]
231
+ pil_cropped_I = Image.fromarray(cropped_I)
232
+ buff_crop = BytesIO()
233
+ pil_cropped_I.save(buff_crop, format='JPEG')
234
+ base64_cropped_I = base64.b64encode(buff_crop.getvalue()).decode("utf-8")
235
+
236
+ #entire image for referring expression generation
237
+ pil_I = Image.fromarray(I)
238
+ buff = BytesIO()
239
+ pil_I.save(buff, format='JPEG')
240
+ base64_I = base64.b64encode(buff.getvalue()).decode("utf-8")
241
+
242
+ # 구분 가능 여부 확인
243
+ generator = OpenAI()
244
+ response_check = generator.chat.completions.create(
245
+ model="chatgpt-4o-latest",
246
+ messages=[
247
+ {
248
+ "role": "user",
249
+ "content": [
250
+ {
251
+
252
+ "type": "text",
253
+ "text": f"""Can the {cat_name} in the provided cropped image be clearly identified as belonging to the category {cat_name}?
254
+ Focus on whether the cropped image provides enough visible features (e.g., ears, head shape, fur texture) to confirm that it is a {cat_name}, even if the full body is not visible.
255
+
256
+ Guidelines:
257
+ - If the visible features (like ears, fur texture or head shape) are sufficient to identify the {cat_name}, respond with YES.
258
+ - If multiple {cat_name}s are entangled or overlapping, making it difficult to distinguish one from another, respond with NONE.
259
+ - If the object is clearly visible and identifiable as a {cat_name}, respond with YES.
260
+
261
+ Output only either YES or NONE.
262
+ """
263
+ },
264
+ {
265
+ "type": "image_url",
266
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_cropped_I}"},
267
+ }
268
+ ]
269
+ },
270
+ ]
271
+ )
272
+
273
+ response_check_content = response_check.choices[0].message.content.strip().lower()
274
+ #print(f"is object {obj_id} visible: {response_check_content}")
275
+
276
+ if "yes" not in response_check_content:
277
+ print(f"Referring expression not generated: {cat_name} is ambiguous in this frame.")
278
+ return {"ref_exp": "NONE", "caption": caption, "cat_name": cat_name, "file_name": frame_name, "isValid" : False}
279
+
280
+ # Referring expression 만들기
281
+ # generator = OpenAI()
282
+ response = generator.chat.completions.create(
283
+ model="chatgpt-4o-latest",
284
+ messages=[
285
+ {
286
+ "role": "user",
287
+ "content": [
288
+ {
289
+ "type": "text",
290
+
291
+ "text": f"""Based on the dense caption, create a referring expression for the {cat_name} highlighted with the red box, corresponding to Object ID {obj_id}.
292
+ Guidelines for creating the referring expression:
293
+ 1. The referring expression should describe the prominent actions or poses of the highlighted {cat_name} (Object ID {obj_id}).
294
+ 2. Focus on the behavior or pose described in the caption that is specifically associated with this {cat_name}. Do not include actions or poses of other {cat_name}s.
295
+ 3. If multiple {cat_name}s are present, ensure that the referring expression exclusively describes the {cat_name} corresponding to Object ID {obj_id}.
296
+ 4. Avoid ambiguous or subjective terms. Use specific and clear action verbs to describe the highlighted {cat_name}.
297
+ 5. The referring expression should only describe Object ID {obj_id} and not any other objects or entities.
298
+ 6. Use '{cat_name}' as the noun for the referring expressions.
299
+ Output only the referring expression for the highlighted {cat_name} (Object ID {obj_id}).
300
+
301
+ {caption}
302
+ """
303
+ },
304
+ {
305
+ "type": "image_url",
306
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
307
+ },
308
+ # {
309
+ # "type": "image_url",
310
+ # "image_url": {"url": f"data:image/jpeg;base64,{base64_cropped_I}"},
311
+ # }
312
+ ],
313
+ }
314
+ ],
315
+ )
316
+
317
+ ref_exp = response.choices[0].message.content.strip()
318
+
319
+ #QA filtering
320
+ #QA1: 원하는 물체를 설명하는지
321
+ filter = OpenAI()
322
+ response1 = filter.chat.completions.create(
323
+ model="chatgpt-4o-latest",
324
+ messages=[
325
+ {
326
+ "role": "user",
327
+ "content": [
328
+ {
329
+ "type": "text",
330
+ "text": f"""Does the given expression describe the {cat_name} highlighted with the red box? If so, only return YES and if not, NO.
331
+ {ref_exp}""",
332
+ },
333
+ {
334
+ "type": "image_url",
335
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
336
+ },
337
+ ],
338
+ }
339
+ ],
340
+ )
341
+
342
+ response1_content = response1.choices[0].message.content
343
+ describesHighlighted = True if "yes" in response1_content.lower() else False
344
+
345
+ #QA2: 원하지 않는 물체를 설명하지 않는지
346
+ response2 = filter.chat.completions.create(
347
+ model="chatgpt-4o-latest",
348
+ messages=[
349
+ {
350
+ "role": "user",
351
+ "content": [
352
+ {
353
+ "type": "text",
354
+ "text": f"""Does the given expression describe the person not highlighted with the red box? If so, only return YES and if not, NO.
355
+ {ref_exp}""",
356
+ },
357
+ {
358
+ "type": "image_url",
359
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
360
+ },
361
+ ],
362
+ }
363
+ ],
364
+ )
365
+
366
+ response2_content = response2.choices[0].message.content
367
+ notDescribesNotHighlighted = False if "yes" in response2_content.lower() else True
368
+
369
+ isValid = True if describesHighlighted and notDescribesNotHighlighted else False
370
+
371
+ #print(f"describesHighlighted: {describesHighlighted}, notDescribesNotHighlighted: {notDescribesNotHighlighted}")
372
+ #print(f"ref exp: {ref_exp}")
373
+ #print("")
374
+
375
+ return {"ref_exp": ref_exp, "caption": caption, "cat_name": cat_name, "file_name": frame_name, "isValid" : isValid}
376
+
377
+
378
+ if __name__ == '__main__':
379
+ with open('mbench/sampled_frame3.json', 'r') as file:
380
+ data = json.load(file)
381
+
382
+ vid_ids = list(data.keys())
383
+ all_ref_exps = {}
384
+
385
+ os.environ['OPENAI_API_KEY'] = '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'
386
+
387
+ # 전체 데이터셋의 vid_id에 대해
388
+ for i in range(50):
389
+ vid_id = vid_ids[i]
390
+
391
+ #====캡션 만들기====
392
+ # print("=====================captioner========================")
393
+ captions, valid_obj_ids = getCaption(vid_id, data)
394
+ cats_in_vid = list(captions.keys())
395
+ # print()
396
+
397
+ #====referring expression 만들고 QA filtering====
398
+ # print("=====================referring expression generator & QA filter========================")
399
+ ref_expressions = {}
400
+
401
+ # 각 카테고리별로
402
+ for cat_name in cats_in_vid:
403
+ if cat_name not in ref_expressions:
404
+ ref_expressions[cat_name] = {}
405
+ # 각 비디오 프레임 별로
406
+ for frame_name in data[vid_id]['frame_names']:
407
+ # print(f'--------category: {cat_name}, frame_name: {frame_name}')
408
+
409
+ if frame_name not in ref_expressions[cat_name]:
410
+ ref_expressions[cat_name][frame_name] = {} # Create frame-level dictionary
411
+ caption = captions[cat_name][frame_name]
412
+ if not caption : continue
413
+ else :
414
+ # 각 obj id별로
415
+ for obj_id in valid_obj_ids:
416
+ ref_exp = getRefExp(vid_id, frame_name, caption, obj_id, data)
417
+ ref_expressions[cat_name][frame_name][obj_id] = ref_exp # Store ref_exp
418
+
419
+ all_ref_exps[vid_id] = ref_expressions
420
+
421
+
422
+ with open('mbench/result_revised50.json', 'w') as file:
423
+ json.dump(all_ref_exps, file, indent=4)
424
+
425
+
426
+
427
+
428
+
.history/mbench/gpt_ref-ytvos_20250119070820.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import build_dataset
2
+ import argparse
3
+ import opts
4
+
5
+ import sys
6
+ from pathlib import Path
7
+ import os
8
+ from os import path as osp
9
+ import skimage
10
+ from io import BytesIO
11
+
12
+ import numpy as np
13
+ import pandas as pd
14
+ import regex as re
15
+ import json
16
+
17
+ import cv2
18
+ from PIL import Image, ImageDraw
19
+ import torch
20
+ from torchvision.transforms import functional as F
21
+
22
+ from skimage import measure # (pip install scikit-image)
23
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
24
+
25
+ import matplotlib.pyplot as plt
26
+ import matplotlib.patches as patches
27
+ from matplotlib.collections import PatchCollection
28
+ from matplotlib.patches import Rectangle
29
+
30
+
31
+ import ipywidgets as widgets
32
+ from IPython.display import display, clear_output
33
+
34
+ from openai import OpenAI
35
+ import base64
36
+
37
+ os.environ['OPENAI_API_KEY'] = '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'
38
+
39
+ # Function to encode the image
40
+ def encode_image(image_path):
41
+ with open(image_path, "rb") as image_file:
42
+ return base64.b64encode(image_file.read()).decode("utf-8")
43
+
44
+ def getCaption(video_id, json_data):
45
+ #데이터 가져오기
46
+ video_data = json_data[video_id]
47
+ frame_names = video_data['frame_names']
48
+ video_path = video_data['video_path']
49
+
50
+ cat_names = set()
51
+ for obj_id in list(video_data['annotations'][0].keys()):
52
+ cat_names.add(video_data['annotations'][0][obj_id]['category_name'])
53
+
54
+ if len(cat_names) == 1:
55
+ cat_name = next(iter(cat_names))
56
+ else:
57
+ print("more than 2 categories")
58
+ return -1
59
+
60
+ image_paths = [os.path.join(video_path, frame_name + '.jpg') for frame_name in frame_names]
61
+ image_captions = {}
62
+
63
+ captioner = OpenAI()
64
+ for i in range(len(image_paths)):
65
+ image_path = image_paths[i]
66
+ frame_name = frame_names[i]
67
+ base64_image = encode_image(image_path)
68
+
69
+ #1단계: 필터링
70
+ response1 = captioner.chat.completions.create(
71
+ model="gpt-4o-mini",
72
+ messages=[
73
+ {
74
+ "role": "user",
75
+ "content": [
76
+ {
77
+ "type": "text",
78
+ "text": f"Are there multiple {cat_name}s that can be distinguished by action? Each action should be prominent and describe the corresponding object only. If so, only output YES. If not, only output None",
79
+ },
80
+ {
81
+ "type": "image_url",
82
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
83
+ },
84
+ ],
85
+ }
86
+ ],
87
+ )
88
+ response_content = response1.choices[0].message.content
89
+ should_caption = True if "yes" in response_content.lower() else False
90
+
91
+ #2단계: dense caption 만들기
92
+ if should_caption:
93
+ response2 = captioner.chat.completions.create(
94
+ model="gpt-4o-mini",
95
+ messages=[
96
+ {
97
+ "role": "user",
98
+ "content": [
99
+ {
100
+ "type": "text",
101
+ "text": f"""
102
+ Describe the image in detail focusing on the {cat_name}s' actions.
103
+ 1. Each action should be prominent, clear and unique, describing the corresponding object only.
104
+ 2. Avoid overly detailed or indeterminate details such as ‘in anticipation’.
105
+ 3. Avoid subjective descriptions such as ‘soft’, ‘controlled’, ‘attentive’, ‘skilled’, ‘casual atmosphere’ and descriptions of the setting.
106
+ 4. Do not include actions that needs to be guessed or suggested.""",
107
+ },
108
+ {
109
+ "type": "image_url",
110
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
111
+ },
112
+ ],
113
+ }
114
+ ],
115
+ )
116
+
117
+ caption = response2.choices[0].message.content
118
+ else:
119
+ caption = None
120
+
121
+ image_captions[frame_name] = caption
122
+ return image_captions
123
+
124
+ def getRefExp(video_id, frame_name, caption, obj_id, json_data):
125
+ # 이미지에 해당 물체 바운딩 박스 그리기
126
+ video_data = json_data[video_id]
127
+ frame_names = video_data['frame_names']
128
+ video_path = video_data['video_path']
129
+ I = skimage.io.imread(osp.join(video_path, frame_name + '.jpg'))
130
+ frame_indx = frame_names.index(frame_name)
131
+ obj_data = video_data['annotations'][frame_indx][obj_id]
132
+
133
+ bbox = obj_data['bbox']
134
+ cat_name = obj_data['category_name']
135
+ valid = obj_data['valid']
136
+
137
+ if valid == 0:
138
+ print("Object not in this frame!")
139
+ return {}
140
+
141
+
142
+ x_min, y_min, x_max, y_max = bbox
143
+ x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)
144
+ cv2.rectangle(I, (x_min, y_min), (x_max, y_max), (225, 0, 0), 2)
145
+ plt.figure()
146
+ plt.imshow(I)
147
+ plt.axis('off')
148
+ plt.show()
149
+ pil_I = Image.fromarray(I)
150
+ buff = BytesIO()
151
+ pil_I.save(buff, format='JPEG')
152
+ base64_I = base64.b64encode(buff.getvalue()).decode("utf-8")
153
+
154
+ #ref expression 만들기
155
+ generator = OpenAI()
156
+ response = generator.chat.completions.create(
157
+ model="gpt-4o-mini",
158
+ messages=[
159
+ {
160
+ "role": "user",
161
+ "content": [
162
+ {
163
+ "type": "text",
164
+ "text": f"""Based on the dense caption, create a referring expression for the {cat_name} highlighted with the red box.
165
+ 1. The referring expression describes the action and does not contain information about appearance or location in the picture.
166
+ 2. Focus only on prominent actions and avoid overly detailed or indeterminate details.
167
+ 3. Avoid subjective terms describing emotion such as ‘in anticipation’, ‘attentively’ or ‘relaxed’ and professional, difficult words.
168
+ 4. The referring expression should only describe the highlighted {cat_name} and not any other.
169
+ 5. Use '{cat_name}' as the noun for the referring expressions.
170
+ Output only the referring expression.
171
+ {caption}""",
172
+ },
173
+ {
174
+ "type": "image_url",
175
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
176
+ },
177
+ ],
178
+ }
179
+ ],
180
+ )
181
+
182
+ ref_exp = response.choices[0].message.content
183
+
184
+ #QA filtering
185
+ #QA1: 원하는 물체를 설명하는지
186
+ filter = OpenAI()
187
+ response1 = filter.chat.completions.create(
188
+ model="gpt-4o-mini",
189
+ messages=[
190
+ {
191
+ "role": "user",
192
+ "content": [
193
+ {
194
+ "type": "text",
195
+ "text": f"""Does the given expression describe the {cat_name} highlighted with the red box? If so, only return YES and if not, NO.
196
+ {ref_exp}""",
197
+ },
198
+ {
199
+ "type": "image_url",
200
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
201
+ },
202
+ ],
203
+ }
204
+ ],
205
+ )
206
+
207
+ response1_content = response1.choices[0].message.content
208
+ describesHighlighted = True if "yes" in response1_content.lower() else False
209
+
210
+ #QA2: 원하지 않는 물체를 설명하지 않는지
211
+ response2 = filter.chat.completions.create(
212
+ model="gpt-4o-mini",
213
+ messages=[
214
+ {
215
+ "role": "user",
216
+ "content": [
217
+ {
218
+ "type": "text",
219
+ "text": f"""Does the given expression describe the person not highlighted with the red box? If so, only return YES and if not, NO.
220
+ {ref_exp}""",
221
+ },
222
+ {
223
+ "type": "image_url",
224
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_I}"},
225
+ },
226
+ ],
227
+ }
228
+ ],
229
+ )
230
+
231
+ response2_content = response2.choices[0].message.content
232
+ describesNotHighlighted = True if "yes" in response2_content.lower() else False
233
+
234
+ isValid = True if describesHighlighted and not describesNotHighlighted else False
235
+
236
+ print(f"describesHighlighted: {describesHighlighted}, describesNotHighlighted: {describesNotHighlighted}")
237
+
238
+ return {"ref_exp": ref_exp, "caption": caption, "cat_name": cat_name, "file_name": frame_name, "isValid" : isValid}
239
+
240
+ def createRefExp(video_id, json_data):
241
+ video_data = json_data[video_id]
242
+ obj_ids = list(video_data['annotations'][0].keys())
243
+ frame_names = video_data['frame_names']
244
+
245
+ captions_per_frame = getCaption(video_id, json_data)
246
+
247
+ if captions_per_frame == -1:
248
+ print("There are more than 2 cateories")
249
+ return
250
+
251
+
252
+ video_ref_exps = {}
253
+
254
+ for frame_name in frame_names:
255
+ frame_caption = captions_per_frame[frame_name]
256
+
257
+ if frame_caption == None:
258
+ video_ref_exps[frame_name] = None
259
+
260
+ else:
261
+ frame_ref_exps = {}
262
+ for obj_id in obj_ids:
263
+ exp_per_obj = getRefExp(video_id, frame_name, frame_caption, obj_id, json_data)
264
+ frame_ref_exps[obj_id] = exp_per_obj
265
+ video_ref_exps[frame_name] = frame_ref_exps
266
+
267
+ return video_ref_exps
268
+
269
+ if __name__ == '__main__':
270
+ with open('mbench/sampled_frame3.json', 'r') as file:
271
+ data = json.load(file)
272
+
273
+ videos = set()
274
+ with open('make_ref-ytvos/selected_frames.jsonl', 'r') as file:
275
+ manual_select = list(file)
276
+ for frame in manual_select:
277
+ result = json.loads(frame)
278
+ videos.add(result['video'])
279
+ videos = list(videos)
280
+
281
+
282
+ all_video_refs = {}
283
+ for i in range(10):
284
+ video_id = videos[i]
285
+ video_ref = createRefExp(video_id, data)
286
+ all_video_refs[video_id] = video_ref
.history/mbench/gpt_ref-ytvos_numbered_cy_20250130183936.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from os import path as osp
4
+ from io import BytesIO
5
+
6
+ from mbench.ytvos_ref import build as build_ytvos_ref
7
+ import argparse
8
+ import opts
9
+
10
+ import sys
11
+ from pathlib import Path
12
+ import os
13
+ from os import path as osp
14
+ import skimage
15
+ from io import BytesIO
16
+
17
+ import numpy as np
18
+ import pandas as pd
19
+ import regex as re
20
+ import json
21
+
22
+ import cv2
23
+ from PIL import Image, ImageDraw
24
+ import torch
25
+ from torchvision.transforms import functional as F
26
+
27
+ from skimage import measure # (pip install scikit-image)
28
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
29
+
30
+ import matplotlib.pyplot as plt
31
+ import matplotlib.patches as patches
32
+ from matplotlib.collections import PatchCollection
33
+ from matplotlib.patches import Rectangle
34
+ import textwrap
35
+
36
+
37
+ import ipywidgets as widgets
38
+ from IPython.display import display, clear_output
39
+
40
+ from openai import OpenAI
41
+ import base64
42
+
43
+ def number_objects_and_encode(idx, color_mask=False):
44
+ encoded_frames = {}
45
+ contoured_frames = {} # New dictionary for original images
46
+ vid_cat_cnts = {}
47
+
48
+ vid_meta = metas[idx]
49
+ vid_data = train_dataset[idx]
50
+ vid_id = vid_meta['video']
51
+ frame_indx = vid_meta['sample_indx']
52
+ cat_names = set(vid_meta['obj_id_cat'].values())
53
+ imgs = vid_data[0]
54
+
55
+ for cat in cat_names:
56
+ cat_frames = []
57
+ contour_frames = []
58
+ frame_cat_cnts = {}
59
+
60
+ for i in range(imgs.size(0)):
61
+ frame_name = frame_indx[i]
62
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
63
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
64
+
65
+ frame_data = vid_data[2][frame_name]
66
+ obj_ids = list(frame_data.keys())
67
+
68
+ cat_cnt = 0
69
+
70
+ for j in range(len(obj_ids)):
71
+ obj_id = obj_ids[j]
72
+ obj_data = frame_data[obj_id]
73
+ obj_bbox = obj_data['bbox']
74
+ obj_valid = obj_data['valid']
75
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
76
+ obj_cat = obj_data['category_name']
77
+
78
+ if obj_cat == cat and obj_valid:
79
+ cat_cnt += 1
80
+
81
+ if color_mask == False:
82
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
83
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
84
+ for i, contour in enumerate(contours):
85
+ # 윤곽선 중심 계산
86
+ moments = cv2.moments(contour)
87
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
88
+ cx = int(moments["m10"] / moments["m00"])
89
+ cy = int(moments["m01"] / moments["m00"])
90
+ else:
91
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
92
+
93
+ # 텍스트 배경 (검은색 배경 만들기)
94
+ font = cv2.FONT_HERSHEY_SIMPLEX
95
+ text = obj_id
96
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
97
+ text_w, text_h = text_size
98
+
99
+ # 텍스트 배경 그리기 (검은색 배경)
100
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
101
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
102
+
103
+ # 텍스트 그리기 (흰색 텍스트)
104
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
105
+ font, 1, (255, 255, 255), 2)
106
+
107
+ else:
108
+ alpha = 0.08
109
+
110
+ colored_obj_mask = np.zeros_like(frame)
111
+ colored_obj_mask[obj_mask == 1] = colors[j]
112
+ frame[obj_mask == 1] = (
113
+ (1 - alpha) * frame[obj_mask == 1]
114
+ + alpha * colored_obj_mask[obj_mask == 1]
115
+ )
116
+
117
+
118
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
119
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
120
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
121
+
122
+
123
+
124
+ if len(contours) > 0:
125
+ largest_contour = max(contours, key=cv2.contourArea)
126
+ M = cv2.moments(largest_contour)
127
+ if M["m00"] != 0:
128
+ center_x = int(M["m10"] / M["m00"])
129
+ center_y = int(M["m01"] / M["m00"])
130
+ else:
131
+ center_x, center_y = 0, 0
132
+
133
+ font = cv2.FONT_HERSHEY_SIMPLEX
134
+ text = obj_id
135
+
136
+ font_scale = 0.9
137
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
138
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
139
+ text_y = center_y
140
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
141
+
142
+ # 텍스트 배경 사각형 좌표 계산
143
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
144
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
145
+ rect_end = (text_x + text_size[0] + 5, text_y)
146
+
147
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
148
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
149
+
150
+ # plt.figure(figsize=(12, 8))
151
+ # plt.imshow(frame)
152
+ # plt.title(f"frame {frame_name}")
153
+ # plt.tight_layout()
154
+ # plt.axis('off')
155
+ # plt.show()
156
+
157
+ buffer = BytesIO()
158
+ frame = Image.fromarray(frame)
159
+ frame.save(buffer, format='jpeg')
160
+ buffer.seek(0)
161
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
162
+ frame_cat_cnts[frame_name] = cat_cnt
163
+
164
+ buffer.seek(0) # Reuse buffer instead of creating a new one
165
+ buffer.truncate()
166
+ frame_for_contour = Image.fromarray(frame_for_contour)
167
+ frame_for_contour.save(buffer, format='jpeg')
168
+ buffer.seek(0)
169
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
170
+
171
+ encoded_frames[cat] = cat_frames
172
+ contoured_frames[cat] = contour_frames
173
+ vid_cat_cnts[cat] = frame_cat_cnts
174
+
175
+ return encoded_frames, vid_cat_cnts, contoured_frames
176
+
177
+ if __name__ == '__main__':
178
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
179
+ args = parser.parse_args()
180
+
181
+ #==================데이터 불러오기===================
182
+ # 전체 데이터셋
183
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
184
+
185
+ # 전체 데이터셋 메타데이터
186
+ metas = train_dataset.metas
187
+
188
+ # 색상 후보 8개 (RGB 형식)
189
+ colors = [
190
+ (255, 0, 0), # Red
191
+ (0, 255, 0), # Green
192
+ (0, 0, 255), # Blue
193
+ (255, 255, 0), # Yellow
194
+ (255, 0, 255), # Magenta
195
+ (0, 255, 255), # Cyan
196
+ (128, 0, 128), # Purple
197
+ (255, 165, 0) # Orange
198
+ ]
199
+
.history/mbench/gpt_ref-ytvos_numbered_cy_20250130190533.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import sys
4
+ from os import path as osp
5
+ from io import BytesIO
6
+
7
+ from ytvos_ref import build as build_ytvos_ref
8
+ import argparse
9
+ import opts
10
+
11
+ import sys
12
+ from pathlib import Path
13
+ import os
14
+ from os import path as osp
15
+ import skimage
16
+ from io import BytesIO
17
+
18
+ import numpy as np
19
+ import pandas as pd
20
+ import regex as re
21
+ import json
22
+
23
+ import cv2
24
+ from PIL import Image, ImageDraw
25
+ import torch
26
+ from torchvision.transforms import functional as F
27
+
28
+ from skimage import measure # (pip install scikit-image)
29
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
30
+
31
+ import matplotlib.pyplot as plt
32
+ import matplotlib.patches as patches
33
+ from matplotlib.collections import PatchCollection
34
+ from matplotlib.patches import Rectangle
35
+ import textwrap
36
+
37
+
38
+ import ipywidgets as widgets
39
+ from IPython.display import display, clear_output
40
+
41
+ from openai import OpenAI
42
+ import base64
43
+ import json
44
+
45
+ def number_objects_and_encode(idx, color_mask=False):
46
+ encoded_frames = {}
47
+ contoured_frames = {} # New dictionary for original images
48
+ vid_cat_cnts = {}
49
+
50
+ vid_meta = metas[idx]
51
+ vid_data = train_dataset[idx]
52
+ vid_id = vid_meta['video']
53
+ frame_indx = vid_meta['sample_indx']
54
+ cat_names = set(vid_meta['obj_id_cat'].values())
55
+ imgs = vid_data[0]
56
+
57
+ for cat in cat_names:
58
+ cat_frames = []
59
+ contour_frames = []
60
+ frame_cat_cnts = {}
61
+
62
+ for i in range(imgs.size(0)):
63
+ frame_name = frame_indx[i]
64
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
65
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
66
+
67
+ frame_data = vid_data[2][frame_name]
68
+ obj_ids = list(frame_data.keys())
69
+
70
+ cat_cnt = 0
71
+
72
+ for j in range(len(obj_ids)):
73
+ obj_id = obj_ids[j]
74
+ obj_data = frame_data[obj_id]
75
+ obj_bbox = obj_data['bbox']
76
+ obj_valid = obj_data['valid']
77
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
78
+ obj_cat = obj_data['category_name']
79
+
80
+ if obj_cat == cat and obj_valid:
81
+ cat_cnt += 1
82
+
83
+ if color_mask == False:
84
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
85
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
86
+ for i, contour in enumerate(contours):
87
+ # 윤곽선 중심 계산
88
+ moments = cv2.moments(contour)
89
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
90
+ cx = int(moments["m10"] / moments["m00"])
91
+ cy = int(moments["m01"] / moments["m00"])
92
+ else:
93
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
94
+
95
+ # 텍스트 배경 (검은색 배경 만들기)
96
+ font = cv2.FONT_HERSHEY_SIMPLEX
97
+ text = obj_id
98
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
99
+ text_w, text_h = text_size
100
+
101
+ # 텍스트 배경 그리기 (검은색 배경)
102
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
103
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
104
+
105
+ # 텍스트 그리기 (흰색 텍스트)
106
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
107
+ font, 1, (255, 255, 255), 2)
108
+
109
+ else:
110
+ alpha = 0.08
111
+
112
+ colored_obj_mask = np.zeros_like(frame)
113
+ colored_obj_mask[obj_mask == 1] = colors[j]
114
+ frame[obj_mask == 1] = (
115
+ (1 - alpha) * frame[obj_mask == 1]
116
+ + alpha * colored_obj_mask[obj_mask == 1]
117
+ )
118
+
119
+
120
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
121
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
122
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
123
+
124
+
125
+
126
+ if len(contours) > 0:
127
+ largest_contour = max(contours, key=cv2.contourArea)
128
+ M = cv2.moments(largest_contour)
129
+ if M["m00"] != 0:
130
+ center_x = int(M["m10"] / M["m00"])
131
+ center_y = int(M["m01"] / M["m00"])
132
+ else:
133
+ center_x, center_y = 0, 0
134
+
135
+ font = cv2.FONT_HERSHEY_SIMPLEX
136
+ text = obj_id
137
+
138
+ font_scale = 0.9
139
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
140
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
141
+ text_y = center_y
142
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
143
+
144
+ # 텍스트 배경 사각형 좌표 계산
145
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
146
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
147
+ rect_end = (text_x + text_size[0] + 5, text_y)
148
+
149
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
150
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
151
+
152
+ # plt.figure(figsize=(12, 8))
153
+ # plt.imshow(frame)
154
+ # plt.title(f"frame {frame_name}")
155
+ # plt.tight_layout()
156
+ # plt.axis('off')
157
+ # plt.show()
158
+
159
+ buffer = BytesIO()
160
+ frame = Image.fromarray(frame)
161
+ frame.save(buffer, format='jpeg')
162
+ buffer.seek(0)
163
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
164
+ frame_cat_cnts[frame_name] = cat_cnt
165
+
166
+ buffer.seek(0) # Reuse buffer instead of creating a new one
167
+ buffer.truncate()
168
+ frame_for_contour = Image.fromarray(frame_for_contour)
169
+ frame_for_contour.save(buffer, format='jpeg')
170
+ buffer.seek(0)
171
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
172
+
173
+ encoded_frames[cat] = cat_frames
174
+ contoured_frames[cat] = contour_frames
175
+ vid_cat_cnts[cat] = frame_cat_cnts
176
+
177
+ return encoded_frames, vid_cat_cnts, contoured_frames
178
+
179
+
180
+ def getCaption(idx, color_mask=True):
181
+ vid_meta = metas[idx]
182
+ vid_data = train_dataset[idx]
183
+ vid_id = vid_meta['video']
184
+ print(f"vid id: {vid_id}\n")
185
+
186
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
187
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
188
+ all_captions = dict()
189
+
190
+ base64_frames, vid_cat_cnts, contoured_frames = number_objects_and_encode(idx, color_mask)
191
+ marked = "mask with boundary" if color_mask else "boundary"
192
+
193
+ for cat_name in list(cat_names) :
194
+
195
+ is_movable = False
196
+ if cat_name in ytvos_category_valid_list :
197
+ is_movable = True
198
+
199
+ if not is_movable:
200
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
201
+
202
+
203
+ image_captions = {}
204
+ captioner = OpenAI()
205
+ cat_base64_frames = base64_frames[cat_name]
206
+ cont_base64_frames = contoured_frames[cat_name]
207
+
208
+ for i in range(len(cat_base64_frames)):
209
+ frame_name = frame_indx[i]
210
+ cont_base64_image = cont_base64_frames[i]
211
+ base64_image = cat_base64_frames[i]
212
+ should_filter = False
213
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
214
+
215
+ if frame_cat_cnts >= 2:
216
+ should_filter = True
217
+ else:
218
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
219
+
220
+ if is_movable and should_filter:
221
+ #1단계: 필터링
222
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
223
+ caption_filter_text = f"""
224
+ You are a visual assistant analyzing a single frame from a video.
225
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
226
+
227
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
228
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing towards, walking...), motion cues (inferred from the momentary stance or position),
229
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
230
+
231
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
232
+
233
+ - Respond with "YES" if:
234
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
235
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
236
+ 3) Each action is unambiguously recognizable and distinct.
237
+
238
+ - Respond with "NONE" if:
239
+ 1) The actions or pose are not clearly differentiable or too similar.
240
+ 2) They show no noticeable action beyond standing or minor movements.
241
+
242
+ Answer strictly with either "YES" or "NONE".
243
+ """
244
+
245
+
246
+ response1 = captioner.chat.completions.create(
247
+ model="chatgpt-4o-latest",
248
+ messages=[
249
+ {
250
+ "role": "user",
251
+ "content": [
252
+ {
253
+ "type": "text",
254
+ "text": caption_filter_text,
255
+ },
256
+ {
257
+ "type": "image_url",
258
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
259
+ }
260
+ ],
261
+ }
262
+ ],
263
+ )
264
+ response_content = response1.choices[0].message.content
265
+ should_caption = True if "yes" in response_content.lower() else False
266
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
267
+
268
+ else:
269
+ should_caption = False
270
+
271
+ #2단계: dense caption 만들기
272
+ dense_caption_prompt_1 = f"""You are a visual assistant that can analyze a single frame of a video and create referring expressions for each object.
273
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
274
+ I want to use your expressions to create a action-centric referring expression dataset.
275
+ Therefore, your expressions for these {cat_name}s should describe unique action of each object.
276
+
277
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
278
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
279
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
280
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
281
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
282
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
283
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
284
+ 8. Include interactions with objects or other entities when they are prominent and observable.
285
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
286
+ 10. Do not include descriptions of appearance such as clothes, color, size, shape etc.
287
+ 11. Do not include relative position between objects such as 'the left elephant' because left/right can be ambiguous.
288
+ 12. Do not mention object IDs.
289
+ 13. Use '{cat_name}' as the noun for the referring expressions.
290
+
291
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
292
+ Output referring expressions for each object id.
293
+ """
294
+
295
+ dense_caption_prompt = f"""
296
+ You are a visual assistant analyzing a single frame of a video.
297
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
298
+ I want to use your expressions to create a action-centric referring expression dataset.
299
+ Please describe each {cat_name} using **clearly observable** and **specific** actions.
300
+
301
+ ## Guidelines:
302
+ 1. Focus on visible, prominent actions only (e.g., running, pushing, grasping an object).
303
+ 2. Avoid describing minor or ambiguous actions (e.g., slightly moving a paw).
304
+ 3. Do not include subjective or speculative descriptions (e.g., “it seems excited” or “it might be preparing to jump”).
305
+ 4. Do not use vague expressions like "interacting with something"** or "engaging with another object."
306
+ Instead, specify the interaction in detail (e.g., "grabbing a stick," "pressing a button").
307
+ 5. Use dynamic action verbs (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
308
+ 6. If multiple {cat_name}s appear, ensure each description is detailed enough to differentiate their actions.
309
+ 7. Base your description on the following action definitions:
310
+ - Facial with object manipulation
311
+ - General body movement, body position or pattern
312
+ - Movements when interacting with a specific, named object (e.g., "kicking a ball" instead of "interacting with an object").
313
+ - Body movements in person or animal interaction (e.g., "pushing another person" instead of "engaging with someone").
314
+
315
+ ## Output Format:
316
+ - For each labeled {cat_name}, output one line in the format:
317
+ ID. action-oriented description
318
+
319
+ Example:
320
+ 1. a bear grasping the edge of a wood with its front paws
321
+ 2. the bear pushing another bear, leaning forward
322
+
323
+ **Do not include** appearance details (e.g., color, size, shape) or relative positioning (e.g., “on the left/right”).
324
+ **Do not mention object IDs** in the text of your sentence—just use them as labels for your output lines.
325
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
326
+ For each labeled {cat_name}, output referring expressions for each object id.
327
+ """
328
+ if should_caption:
329
+ response2 = captioner.chat.completions.create(
330
+ model="chatgpt-4o-latest",
331
+ messages=[
332
+ {
333
+ "role": "user",
334
+ "content": [
335
+ {
336
+ "type": "text",
337
+ "text": dense_caption_prompt,
338
+ },
339
+ {
340
+ "type": "image_url",
341
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
342
+ },
343
+ ],
344
+ }
345
+ ],
346
+ )
347
+
348
+ caption = response2.choices[0].message.content
349
+ #print(f"{image_path} - {frame_name}: {caption}")
350
+ else:
351
+ caption = None
352
+
353
+ image_captions[frame_name] = caption
354
+ all_captions[cat_name] = image_captions
355
+
356
+ # final : also prepare valid object ids
357
+ valid_obj_ids = dict()
358
+
359
+ for cat in cat_names:
360
+ if cat in ytvos_category_valid_list:
361
+ obj_id_cat = vid_meta['obj_id_cat']
362
+ valid_cat_ids = []
363
+ for obj_id in list(obj_id_cat.keys()):
364
+ if obj_id_cat[obj_id] == cat:
365
+ valid_cat_ids.append(obj_id)
366
+ valid_obj_ids[cat] = valid_cat_ids
367
+
368
+ return vid_id, all_captions, valid_obj_ids
369
+
370
+
371
+ if __name__ == '__main__':
372
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
373
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions.json")
374
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids.json")
375
+
376
+ args = parser.parse_args()
377
+
378
+ print(args.save_caption_path, flush=True)
379
+ print(args.save_valid_obj_ids_path, flush=True)
380
+
381
+ #==================데이터 불러오기===================
382
+ # 전체 데이터셋
383
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
384
+
385
+ # 전체 데이터셋 메타데이터
386
+ metas = train_dataset.metas
387
+
388
+ # 색상 후보 8개 (RGB 형식)
389
+ colors = [
390
+ (255, 0, 0), # Red
391
+ (0, 255, 0), # Green
392
+ (0, 0, 255), # Blue
393
+ (255, 255, 0), # Yellow
394
+ (255, 0, 255), # Magenta
395
+ (0, 255, 255), # Cyan
396
+ (128, 0, 128), # Purple
397
+ (255, 165, 0) # Orange
398
+ ]
399
+
400
+ ytvos_category_valid_list = [
401
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
402
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
403
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
404
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
405
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
406
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
407
+ ]
408
+
409
+ #==================gpt 돌리기===================
410
+ os.environ['OPENAI_API_KEY'] = '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'
411
+
412
+ result_captions = {}
413
+ result_valid_obj_ids = {}
414
+
415
+ for i in range(370):
416
+ vid_id, all_captions, valid_obj_ids = getCaption(i, True)
417
+
418
+ if vid_id not in result_captions:
419
+ result_captions[vid_id] = all_captions
420
+ if vid_id not in result_valid_obj_ids:
421
+ result_valid_obj_ids[vid_id] = valid_obj_ids
422
+
423
+ print("Finished!", flush=True)
424
+
425
+ with open(args.save_caption_path, "w") as file:
426
+ json.dump(result_captions, file, indent=4)
427
+
428
+ with open(args.save_valid_obj_ids_path, "w") as file:
429
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/gpt_ref-ytvos_numbered_cy_20250130190813.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+
5
+ from os import path as osp
6
+ from io import BytesIO
7
+
8
+ from mbench.ytvos_ref import build as build_ytvos_ref
9
+ import argparse
10
+ import opts
11
+
12
+ import sys
13
+ from pathlib import Path
14
+ import os
15
+ from os import path as osp
16
+ import skimage
17
+ from io import BytesIO
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ import regex as re
22
+ import json
23
+
24
+ import cv2
25
+ from PIL import Image, ImageDraw
26
+ import torch
27
+ from torchvision.transforms import functional as F
28
+
29
+ from skimage import measure # (pip install scikit-image)
30
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
31
+
32
+ import matplotlib.pyplot as plt
33
+ import matplotlib.patches as patches
34
+ from matplotlib.collections import PatchCollection
35
+ from matplotlib.patches import Rectangle
36
+ import textwrap
37
+
38
+
39
+ import ipywidgets as widgets
40
+ from IPython.display import display, clear_output
41
+
42
+ from openai import OpenAI
43
+ import base64
44
+ import json
45
+
46
+ def number_objects_and_encode(idx, color_mask=False):
47
+ encoded_frames = {}
48
+ contoured_frames = {} # New dictionary for original images
49
+ vid_cat_cnts = {}
50
+
51
+ vid_meta = metas[idx]
52
+ vid_data = train_dataset[idx]
53
+ vid_id = vid_meta['video']
54
+ frame_indx = vid_meta['sample_indx']
55
+ cat_names = set(vid_meta['obj_id_cat'].values())
56
+ imgs = vid_data[0]
57
+
58
+ for cat in cat_names:
59
+ cat_frames = []
60
+ contour_frames = []
61
+ frame_cat_cnts = {}
62
+
63
+ for i in range(imgs.size(0)):
64
+ frame_name = frame_indx[i]
65
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
66
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
67
+
68
+ frame_data = vid_data[2][frame_name]
69
+ obj_ids = list(frame_data.keys())
70
+
71
+ cat_cnt = 0
72
+
73
+ for j in range(len(obj_ids)):
74
+ obj_id = obj_ids[j]
75
+ obj_data = frame_data[obj_id]
76
+ obj_bbox = obj_data['bbox']
77
+ obj_valid = obj_data['valid']
78
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
79
+ obj_cat = obj_data['category_name']
80
+
81
+ if obj_cat == cat and obj_valid:
82
+ cat_cnt += 1
83
+
84
+ if color_mask == False:
85
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
86
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
87
+ for i, contour in enumerate(contours):
88
+ # 윤곽선 중심 계산
89
+ moments = cv2.moments(contour)
90
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
91
+ cx = int(moments["m10"] / moments["m00"])
92
+ cy = int(moments["m01"] / moments["m00"])
93
+ else:
94
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
95
+
96
+ # 텍스트 배경 (검은색 배경 만들기)
97
+ font = cv2.FONT_HERSHEY_SIMPLEX
98
+ text = obj_id
99
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
100
+ text_w, text_h = text_size
101
+
102
+ # 텍스트 배경 그리기 (검은색 배경)
103
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
104
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
105
+
106
+ # 텍스트 그리기 (흰색 텍스트)
107
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
108
+ font, 1, (255, 255, 255), 2)
109
+
110
+ else:
111
+ alpha = 0.08
112
+
113
+ colored_obj_mask = np.zeros_like(frame)
114
+ colored_obj_mask[obj_mask == 1] = colors[j]
115
+ frame[obj_mask == 1] = (
116
+ (1 - alpha) * frame[obj_mask == 1]
117
+ + alpha * colored_obj_mask[obj_mask == 1]
118
+ )
119
+
120
+
121
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
122
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
123
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
124
+
125
+
126
+
127
+ if len(contours) > 0:
128
+ largest_contour = max(contours, key=cv2.contourArea)
129
+ M = cv2.moments(largest_contour)
130
+ if M["m00"] != 0:
131
+ center_x = int(M["m10"] / M["m00"])
132
+ center_y = int(M["m01"] / M["m00"])
133
+ else:
134
+ center_x, center_y = 0, 0
135
+
136
+ font = cv2.FONT_HERSHEY_SIMPLEX
137
+ text = obj_id
138
+
139
+ font_scale = 0.9
140
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
141
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
142
+ text_y = center_y
143
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
144
+
145
+ # 텍스트 배경 사각형 좌표 계산
146
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
147
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
148
+ rect_end = (text_x + text_size[0] + 5, text_y)
149
+
150
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
151
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
152
+
153
+ # plt.figure(figsize=(12, 8))
154
+ # plt.imshow(frame)
155
+ # plt.title(f"frame {frame_name}")
156
+ # plt.tight_layout()
157
+ # plt.axis('off')
158
+ # plt.show()
159
+
160
+ buffer = BytesIO()
161
+ frame = Image.fromarray(frame)
162
+ frame.save(buffer, format='jpeg')
163
+ buffer.seek(0)
164
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
165
+ frame_cat_cnts[frame_name] = cat_cnt
166
+
167
+ buffer.seek(0) # Reuse buffer instead of creating a new one
168
+ buffer.truncate()
169
+ frame_for_contour = Image.fromarray(frame_for_contour)
170
+ frame_for_contour.save(buffer, format='jpeg')
171
+ buffer.seek(0)
172
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
173
+
174
+ encoded_frames[cat] = cat_frames
175
+ contoured_frames[cat] = contour_frames
176
+ vid_cat_cnts[cat] = frame_cat_cnts
177
+
178
+ return encoded_frames, vid_cat_cnts, contoured_frames
179
+
180
+
181
+ def getCaption(idx, color_mask=True):
182
+ vid_meta = metas[idx]
183
+ vid_data = train_dataset[idx]
184
+ vid_id = vid_meta['video']
185
+ print(f"vid id: {vid_id}\n")
186
+
187
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
188
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
189
+ all_captions = dict()
190
+
191
+ base64_frames, vid_cat_cnts, contoured_frames = number_objects_and_encode(idx, color_mask)
192
+ marked = "mask with boundary" if color_mask else "boundary"
193
+
194
+ for cat_name in list(cat_names) :
195
+
196
+ is_movable = False
197
+ if cat_name in ytvos_category_valid_list :
198
+ is_movable = True
199
+
200
+ if not is_movable:
201
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
202
+
203
+
204
+ image_captions = {}
205
+ captioner = OpenAI()
206
+ cat_base64_frames = base64_frames[cat_name]
207
+ cont_base64_frames = contoured_frames[cat_name]
208
+
209
+ for i in range(len(cat_base64_frames)):
210
+ frame_name = frame_indx[i]
211
+ cont_base64_image = cont_base64_frames[i]
212
+ base64_image = cat_base64_frames[i]
213
+ should_filter = False
214
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
215
+
216
+ if frame_cat_cnts >= 2:
217
+ should_filter = True
218
+ else:
219
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
220
+
221
+ if is_movable and should_filter:
222
+ #1단계: 필터링
223
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
224
+ caption_filter_text = f"""
225
+ You are a visual assistant analyzing a single frame from a video.
226
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
227
+
228
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
229
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing towards, walking...), motion cues (inferred from the momentary stance or position),
230
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
231
+
232
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
233
+
234
+ - Respond with "YES" if:
235
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
236
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
237
+ 3) Each action is unambiguously recognizable and distinct.
238
+
239
+ - Respond with "NONE" if:
240
+ 1) The actions or pose are not clearly differentiable or too similar.
241
+ 2) They show no noticeable action beyond standing or minor movements.
242
+
243
+ Answer strictly with either "YES" or "NONE".
244
+ """
245
+
246
+
247
+ response1 = captioner.chat.completions.create(
248
+ model="chatgpt-4o-latest",
249
+ messages=[
250
+ {
251
+ "role": "user",
252
+ "content": [
253
+ {
254
+ "type": "text",
255
+ "text": caption_filter_text,
256
+ },
257
+ {
258
+ "type": "image_url",
259
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
260
+ }
261
+ ],
262
+ }
263
+ ],
264
+ )
265
+ response_content = response1.choices[0].message.content
266
+ should_caption = True if "yes" in response_content.lower() else False
267
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
268
+
269
+ else:
270
+ should_caption = False
271
+
272
+ #2단계: dense caption 만들기
273
+ dense_caption_prompt_1 = f"""You are a visual assistant that can analyze a single frame of a video and create referring expressions for each object.
274
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
275
+ I want to use your expressions to create a action-centric referring expression dataset.
276
+ Therefore, your expressions for these {cat_name}s should describe unique action of each object.
277
+
278
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
279
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
280
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
281
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
282
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
283
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
284
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
285
+ 8. Include interactions with objects or other entities when they are prominent and observable.
286
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
287
+ 10. Do not include descriptions of appearance such as clothes, color, size, shape etc.
288
+ 11. Do not include relative position between objects such as 'the left elephant' because left/right can be ambiguous.
289
+ 12. Do not mention object IDs.
290
+ 13. Use '{cat_name}' as the noun for the referring expressions.
291
+
292
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
293
+ Output referring expressions for each object id.
294
+ """
295
+
296
+ dense_caption_prompt = f"""
297
+ You are a visual assistant analyzing a single frame of a video.
298
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
299
+ I want to use your expressions to create a action-centric referring expression dataset.
300
+ Please describe each {cat_name} using **clearly observable** and **specific** actions.
301
+
302
+ ## Guidelines:
303
+ 1. Focus on visible, prominent actions only (e.g., running, pushing, grasping an object).
304
+ 2. Avoid describing minor or ambiguous actions (e.g., slightly moving a paw).
305
+ 3. Do not include subjective or speculative descriptions (e.g., “it seems excited” or “it might be preparing to jump”).
306
+ 4. Do not use vague expressions like "interacting with something"** or "engaging with another object."
307
+ Instead, specify the interaction in detail (e.g., "grabbing a stick," "pressing a button").
308
+ 5. Use dynamic action verbs (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
309
+ 6. If multiple {cat_name}s appear, ensure each description is detailed enough to differentiate their actions.
310
+ 7. Base your description on the following action definitions:
311
+ - Facial with object manipulation
312
+ - General body movement, body position or pattern
313
+ - Movements when interacting with a specific, named object (e.g., "kicking a ball" instead of "interacting with an object").
314
+ - Body movements in person or animal interaction (e.g., "pushing another person" instead of "engaging with someone").
315
+
316
+ ## Output Format:
317
+ - For each labeled {cat_name}, output one line in the format:
318
+ ID. action-oriented description
319
+
320
+ Example:
321
+ 1. a bear grasping the edge of a wood with its front paws
322
+ 2. the bear pushing another bear, leaning forward
323
+
324
+ **Do not include** appearance details (e.g., color, size, shape) or relative positioning (e.g., “on the left/right”).
325
+ **Do not mention object IDs** in the text of your sentence—just use them as labels for your output lines.
326
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
327
+ For each labeled {cat_name}, output referring expressions for each object id.
328
+ """
329
+ if should_caption:
330
+ response2 = captioner.chat.completions.create(
331
+ model="chatgpt-4o-latest",
332
+ messages=[
333
+ {
334
+ "role": "user",
335
+ "content": [
336
+ {
337
+ "type": "text",
338
+ "text": dense_caption_prompt,
339
+ },
340
+ {
341
+ "type": "image_url",
342
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
343
+ },
344
+ ],
345
+ }
346
+ ],
347
+ )
348
+
349
+ caption = response2.choices[0].message.content
350
+ #print(f"{image_path} - {frame_name}: {caption}")
351
+ else:
352
+ caption = None
353
+
354
+ image_captions[frame_name] = caption
355
+ all_captions[cat_name] = image_captions
356
+
357
+ # final : also prepare valid object ids
358
+ valid_obj_ids = dict()
359
+
360
+ for cat in cat_names:
361
+ if cat in ytvos_category_valid_list:
362
+ obj_id_cat = vid_meta['obj_id_cat']
363
+ valid_cat_ids = []
364
+ for obj_id in list(obj_id_cat.keys()):
365
+ if obj_id_cat[obj_id] == cat:
366
+ valid_cat_ids.append(obj_id)
367
+ valid_obj_ids[cat] = valid_cat_ids
368
+
369
+ return vid_id, all_captions, valid_obj_ids
370
+
371
+
372
+ if __name__ == '__main__':
373
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
374
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions.json")
375
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids.json")
376
+
377
+ args = parser.parse_args()
378
+
379
+ #==================데이터 불러오기===================
380
+ # 전체 데이터셋
381
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
382
+
383
+ # 전체 데이터셋 메타데이터
384
+ metas = train_dataset.metas
385
+
386
+ # 색상 후보 8개 (RGB 형식)
387
+ colors = [
388
+ (255, 0, 0), # Red
389
+ (0, 255, 0), # Green
390
+ (0, 0, 255), # Blue
391
+ (255, 255, 0), # Yellow
392
+ (255, 0, 255), # Magenta
393
+ (0, 255, 255), # Cyan
394
+ (128, 0, 128), # Purple
395
+ (255, 165, 0) # Orange
396
+ ]
397
+
398
+ ytvos_category_valid_list = [
399
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
400
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
401
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
402
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
403
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
404
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
405
+ ]
406
+
407
+ #==================gpt 돌리기===================
408
+ os.environ['OPENAI_API_KEY'] = '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'
409
+
410
+ result_captions = {}
411
+ result_valid_obj_ids = {}
412
+
413
+ for i in range(370):
414
+ vid_id, all_captions, valid_obj_ids = getCaption(i, True)
415
+
416
+ if vid_id not in result_captions:
417
+ result_captions[vid_id] = all_captions
418
+ if vid_id not in result_valid_obj_ids:
419
+ result_valid_obj_ids[vid_id] = valid_obj_ids
420
+
421
+ print("Finished!", flush=True)
422
+
423
+ with open(args.save_caption_path, "w") as file:
424
+ json.dump(result_captions, file, indent=4)
425
+
426
+ with open(args.save_valid_obj_ids_path, "w") as file:
427
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/gpt_ref-ytvos_numbered_cy_20250130220417.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+
5
+ from os import path as osp
6
+ from io import BytesIO
7
+
8
+ from mbench.ytvos_ref import build as build_ytvos_ref
9
+ import argparse
10
+ import opts
11
+
12
+ import sys
13
+ from pathlib import Path
14
+ import os
15
+ from os import path as osp
16
+ import skimage
17
+ from io import BytesIO
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ import regex as re
22
+ import json
23
+
24
+ import cv2
25
+ from PIL import Image, ImageDraw
26
+ import torch
27
+ from torchvision.transforms import functional as F
28
+
29
+ from skimage import measure # (pip install scikit-image)
30
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
31
+
32
+ import matplotlib.pyplot as plt
33
+ import matplotlib.patches as patches
34
+ from matplotlib.collections import PatchCollection
35
+ from matplotlib.patches import Rectangle
36
+ import textwrap
37
+
38
+
39
+ import ipywidgets as widgets
40
+ from IPython.display import display, clear_output
41
+
42
+ from openai import OpenAI
43
+ import base64
44
+ import json
45
+
46
+ def number_objects_and_encode(idx, color_mask=False):
47
+ encoded_frames = {}
48
+ contoured_frames = {} # New dictionary for original images
49
+ vid_cat_cnts = {}
50
+
51
+ vid_meta = metas[idx]
52
+ vid_data = train_dataset[idx]
53
+ vid_id = vid_meta['video']
54
+ frame_indx = vid_meta['sample_indx']
55
+ cat_names = set(vid_meta['obj_id_cat'].values())
56
+ imgs = vid_data[0]
57
+
58
+ for cat in cat_names:
59
+ cat_frames = []
60
+ contour_frames = []
61
+ frame_cat_cnts = {}
62
+
63
+ for i in range(imgs.size(0)):
64
+ frame_name = frame_indx[i]
65
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
66
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
67
+
68
+ frame_data = vid_data[2][frame_name]
69
+ obj_ids = list(frame_data.keys())
70
+
71
+ cat_cnt = 0
72
+
73
+ for j in range(len(obj_ids)):
74
+ obj_id = obj_ids[j]
75
+ obj_data = frame_data[obj_id]
76
+ obj_bbox = obj_data['bbox']
77
+ obj_valid = obj_data['valid']
78
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
79
+ obj_cat = obj_data['category_name']
80
+
81
+ if obj_cat == cat and obj_valid:
82
+ cat_cnt += 1
83
+
84
+ if color_mask == False:
85
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
86
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
87
+ for i, contour in enumerate(contours):
88
+ # 윤곽선 중심 계산
89
+ moments = cv2.moments(contour)
90
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
91
+ cx = int(moments["m10"] / moments["m00"])
92
+ cy = int(moments["m01"] / moments["m00"])
93
+ else:
94
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
95
+
96
+ # 텍스트 배경 (검은색 배경 만들기)
97
+ font = cv2.FONT_HERSHEY_SIMPLEX
98
+ text = obj_id
99
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
100
+ text_w, text_h = text_size
101
+
102
+ # 텍스트 배경 그리기 (검은색 배경)
103
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
104
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
105
+
106
+ # 텍스트 그리기 (흰색 텍스트)
107
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
108
+ font, 1, (255, 255, 255), 2)
109
+
110
+ else:
111
+ alpha = 0.08
112
+
113
+ colored_obj_mask = np.zeros_like(frame)
114
+ colored_obj_mask[obj_mask == 1] = colors[j]
115
+ frame[obj_mask == 1] = (
116
+ (1 - alpha) * frame[obj_mask == 1]
117
+ + alpha * colored_obj_mask[obj_mask == 1]
118
+ )
119
+
120
+
121
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
122
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
123
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
124
+
125
+
126
+
127
+ if len(contours) > 0:
128
+ largest_contour = max(contours, key=cv2.contourArea)
129
+ M = cv2.moments(largest_contour)
130
+ if M["m00"] != 0:
131
+ center_x = int(M["m10"] / M["m00"])
132
+ center_y = int(M["m01"] / M["m00"])
133
+ else:
134
+ center_x, center_y = 0, 0
135
+
136
+ font = cv2.FONT_HERSHEY_SIMPLEX
137
+ text = obj_id
138
+
139
+ font_scale = 0.9
140
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
141
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
142
+ text_y = center_y
143
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
144
+
145
+ # 텍스트 배경 사각형 좌표 계산
146
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
147
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
148
+ rect_end = (text_x + text_size[0] + 5, text_y)
149
+
150
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
151
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
152
+
153
+ # plt.figure(figsize=(12, 8))
154
+ # plt.imshow(frame)
155
+ # plt.title(f"frame {frame_name}")
156
+ # plt.tight_layout()
157
+ # plt.axis('off')
158
+ # plt.show()
159
+
160
+ buffer = BytesIO()
161
+ frame = Image.fromarray(frame)
162
+ frame.save(buffer, format='jpeg')
163
+ buffer.seek(0)
164
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
165
+ frame_cat_cnts[frame_name] = cat_cnt
166
+
167
+ buffer.seek(0) # Reuse buffer instead of creating a new one
168
+ buffer.truncate()
169
+ frame_for_contour = Image.fromarray(frame_for_contour)
170
+ frame_for_contour.save(buffer, format='jpeg')
171
+ buffer.seek(0)
172
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
173
+
174
+ encoded_frames[cat] = cat_frames
175
+ contoured_frames[cat] = contour_frames
176
+ vid_cat_cnts[cat] = frame_cat_cnts
177
+
178
+ return encoded_frames, vid_cat_cnts, contoured_frames
179
+
180
+
181
+ def getCaption(idx, color_mask=True):
182
+ vid_meta = metas[idx]
183
+ vid_data = train_dataset[idx]
184
+ vid_id = vid_meta['video']
185
+ print(f"vid id: {vid_id}\n")
186
+
187
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
188
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
189
+ all_captions = dict()
190
+
191
+ base64_frames, vid_cat_cnts, contoured_frames = number_objects_and_encode(idx, color_mask)
192
+ marked = "mask with boundary" if color_mask else "boundary"
193
+
194
+ for cat_name in list(cat_names) :
195
+
196
+ is_movable = False
197
+ if cat_name in ytvos_category_valid_list :
198
+ is_movable = True
199
+
200
+ if not is_movable:
201
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
202
+
203
+
204
+ image_captions = {}
205
+ captioner = OpenAI()
206
+ cat_base64_frames = base64_frames[cat_name]
207
+ cont_base64_frames = contoured_frames[cat_name]
208
+
209
+ for i in range(len(cat_base64_frames)):
210
+ frame_name = frame_indx[i]
211
+ cont_base64_image = cont_base64_frames[i]
212
+ base64_image = cat_base64_frames[i]
213
+ should_filter = False
214
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
215
+
216
+ if frame_cat_cnts >= 2:
217
+ should_filter = True
218
+ else:
219
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
220
+
221
+ if is_movable and should_filter:
222
+ #1단계: 필터링
223
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
224
+ caption_filter_text = f"""
225
+ You are a visual assistant analyzing a single frame from a video.
226
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
227
+
228
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
229
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing towards, walking...), motion cues (inferred from the momentary stance or position),
230
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
231
+
232
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
233
+
234
+ - Respond with "YES" if:
235
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
236
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
237
+ 3) Each action is unambiguously recognizable and distinct.
238
+
239
+ - Respond with "NONE" if:
240
+ 1) The actions or pose are not clearly differentiable or too similar.
241
+ 2) They show no noticeable action beyond standing or minor movements.
242
+
243
+ Answer strictly with either "YES" or "NONE".
244
+ """
245
+
246
+
247
+ response1 = captioner.chat.completions.create(
248
+ model="gpt-4o-mini",
249
+ messages=[
250
+ {
251
+ "role": "user",
252
+ "content": [
253
+ {
254
+ "type": "text",
255
+ "text": caption_filter_text,
256
+ },
257
+ {
258
+ "type": "image_url",
259
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
260
+ }
261
+ ],
262
+ }
263
+ ],
264
+ )
265
+ response_content = response1.choices[0].message.content
266
+ should_caption = True if "yes" in response_content.lower() else False
267
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
268
+
269
+ else:
270
+ should_caption = False
271
+
272
+ #2단계: dense caption 만들기
273
+ dense_caption_prompt_1 = f"""You are a visual assistant that can analyze a single frame of a video and create referring expressions for each object.
274
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
275
+ I want to use your expressions to create a action-centric referring expression dataset.
276
+ Therefore, your expressions for these {cat_name}s should describe unique action of each object.
277
+
278
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
279
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
280
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
281
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
282
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
283
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
284
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
285
+ 8. Include interactions with objects or other entities when they are prominent and observable.
286
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
287
+ 10. Do not include descriptions of appearance such as clothes, color, size, shape etc.
288
+ 11. Do not include relative position between objects such as 'the left elephant' because left/right can be ambiguous.
289
+ 12. Do not mention object IDs.
290
+ 13. Use '{cat_name}' as the noun for the referring expressions.
291
+
292
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
293
+ Output referring expressions for each object id.
294
+ """
295
+
296
+ dense_caption_prompt = f"""
297
+ You are a visual assistant analyzing a single frame of a video.
298
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
299
+ I want to use your expressions to create a action-centric referring expression dataset.
300
+ Please describe each {cat_name} using **clearly observable** and **specific** actions.
301
+
302
+ ## Guidelines:
303
+ 1. Focus on visible, prominent actions only (e.g., running, pushing, grasping an object).
304
+ 2. Avoid describing minor or ambiguous actions (e.g., slightly moving a paw).
305
+ 3. Do not include subjective or speculative descriptions (e.g., “it seems excited” or “it might be preparing to jump”).
306
+ 4. Do not use vague expressions like "interacting with something"** or "engaging with another object."
307
+ Instead, specify the interaction in detail (e.g., "grabbing a stick," "pressing a button").
308
+ 5. Use dynamic action verbs (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
309
+ 6. If multiple {cat_name}s appear, ensure each description is detailed enough to differentiate their actions.
310
+ 7. Base your description on the following action definitions:
311
+ - Facial with object manipulation
312
+ - General body movement, body position or pattern
313
+ - Movements when interacting with a specific, named object (e.g., "kicking a ball" instead of "interacting with an object").
314
+ - Body movements in person or animal interaction (e.g., "pushing another person" instead of "engaging with someone").
315
+
316
+ ## Output Format:
317
+ - For each labeled {cat_name}, output one line in the format:
318
+ ID. action-oriented description
319
+
320
+ Example:
321
+ 1. a bear grasping the edge of a wood with its front paws
322
+ 2. the bear pushing another bear, leaning forward
323
+
324
+ **Do not include** appearance details (e.g., color, size, shape) or relative positioning (e.g., “on the left/right”).
325
+ **Do not mention object IDs** in the text of your sentence—just use them as labels for your output lines.
326
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
327
+ For each labeled {cat_name}, output referring expressions for each object id.
328
+ """
329
+ if should_caption:
330
+ response2 = captioner.chat.completions.create(
331
+ model="gpt-4o-mini",
332
+ messages=[
333
+ {
334
+ "role": "user",
335
+ "content": [
336
+ {
337
+ "type": "text",
338
+ "text": dense_caption_prompt,
339
+ },
340
+ {
341
+ "type": "image_url",
342
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
343
+ },
344
+ ],
345
+ }
346
+ ],
347
+ )
348
+
349
+ caption = response2.choices[0].message.content
350
+ #print(f"{image_path} - {frame_name}: {caption}")
351
+ else:
352
+ caption = None
353
+
354
+ image_captions[frame_name] = caption
355
+ all_captions[cat_name] = image_captions
356
+
357
+ # final : also prepare valid object ids
358
+ valid_obj_ids = dict()
359
+
360
+ for cat in cat_names:
361
+ if cat in ytvos_category_valid_list:
362
+ obj_id_cat = vid_meta['obj_id_cat']
363
+ valid_cat_ids = []
364
+ for obj_id in list(obj_id_cat.keys()):
365
+ if obj_id_cat[obj_id] == cat:
366
+ valid_cat_ids.append(obj_id)
367
+ valid_obj_ids[cat] = valid_cat_ids
368
+
369
+ return vid_id, all_captions, valid_obj_ids
370
+
371
+
372
+ if __name__ == '__main__':
373
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
374
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions.json")
375
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids.json")
376
+
377
+ args = parser.parse_args()
378
+
379
+ #==================데이터 불러오기===================
380
+ # 전체 데이터셋
381
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
382
+
383
+ # 전체 데이터셋 메타데이터
384
+ metas = train_dataset.metas
385
+
386
+ # 색상 후보 8개 (RGB 형식)
387
+ colors = [
388
+ (255, 0, 0), # Red
389
+ (0, 255, 0), # Green
390
+ (0, 0, 255), # Blue
391
+ (255, 255, 0), # Yellow
392
+ (255, 0, 255), # Magenta
393
+ (0, 255, 255), # Cyan
394
+ (128, 0, 128), # Purple
395
+ (255, 165, 0) # Orange
396
+ ]
397
+
398
+ ytvos_category_valid_list = [
399
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
400
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
401
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
402
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
403
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
404
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
405
+ ]
406
+
407
+ #==================gpt 돌리기===================
408
+ os.environ['OPENAI_API_KEY'] = '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'
409
+
410
+ result_captions = {}
411
+ result_valid_obj_ids = {}
412
+
413
+ for i in range(370):
414
+ vid_id, all_captions, valid_obj_ids = getCaption(i, True)
415
+
416
+ if vid_id not in result_captions:
417
+ result_captions[vid_id] = all_captions
418
+ if vid_id not in result_valid_obj_ids:
419
+ result_valid_obj_ids[vid_id] = valid_obj_ids
420
+
421
+ print("Finished!", flush=True)
422
+
423
+ with open(args.save_caption_path, "w") as file:
424
+ json.dump(result_captions, file, indent=4)
425
+
426
+ with open(args.save_valid_obj_ids_path, "w") as file:
427
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/gpt_ref-ytvos_numbered_cy_20250201140559.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+ import time
5
+
6
+ from os import path as osp
7
+ from io import BytesIO
8
+
9
+ from mbench.ytvos_ref import build as build_ytvos_ref
10
+ import argparse
11
+ import opts
12
+
13
+ import sys
14
+ from pathlib import Path
15
+ import os
16
+ from os import path as osp
17
+ import skimage
18
+ from io import BytesIO
19
+
20
+ import numpy as np
21
+ import pandas as pd
22
+ import regex as re
23
+ import json
24
+
25
+ import cv2
26
+ from PIL import Image, ImageDraw
27
+ import torch
28
+ from torchvision.transforms import functional as F
29
+
30
+ from skimage import measure # (pip install scikit-image)
31
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
32
+
33
+ import matplotlib.pyplot as plt
34
+ import matplotlib.patches as patches
35
+ from matplotlib.collections import PatchCollection
36
+ from matplotlib.patches import Rectangle
37
+ import textwrap
38
+
39
+
40
+ import ipywidgets as widgets
41
+ from IPython.display import display, clear_output
42
+
43
+ from openai import OpenAI
44
+ import base64
45
+ import json
46
+
47
+ def number_objects_and_encode(idx, color_mask=False):
48
+ encoded_frames = {}
49
+ contoured_frames = {} # New dictionary for original images
50
+ vid_cat_cnts = {}
51
+
52
+ vid_meta = metas[idx]
53
+ vid_data = train_dataset[idx]
54
+ vid_id = vid_meta['video']
55
+ frame_indx = vid_meta['sample_indx']
56
+ cat_names = set(vid_meta['obj_id_cat'].values())
57
+ imgs = vid_data[0]
58
+
59
+ for cat in cat_names:
60
+ cat_frames = []
61
+ contour_frames = []
62
+ frame_cat_cnts = {}
63
+
64
+ for i in range(imgs.size(0)):
65
+ frame_name = frame_indx[i]
66
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
67
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
68
+
69
+ frame_data = vid_data[2][frame_name]
70
+ obj_ids = list(frame_data.keys())
71
+
72
+ cat_cnt = 0
73
+
74
+ for j in range(len(obj_ids)):
75
+ obj_id = obj_ids[j]
76
+ obj_data = frame_data[obj_id]
77
+ obj_bbox = obj_data['bbox']
78
+ obj_valid = obj_data['valid']
79
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
80
+ obj_cat = obj_data['category_name']
81
+
82
+ if obj_cat == cat and obj_valid:
83
+ cat_cnt += 1
84
+
85
+ if color_mask == False:
86
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
87
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
88
+ for i, contour in enumerate(contours):
89
+ # 윤곽선 중심 계산
90
+ moments = cv2.moments(contour)
91
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
92
+ cx = int(moments["m10"] / moments["m00"])
93
+ cy = int(moments["m01"] / moments["m00"])
94
+ else:
95
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
96
+
97
+ # 텍스트 배경 (검은색 배경 만들기)
98
+ font = cv2.FONT_HERSHEY_SIMPLEX
99
+ text = obj_id
100
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
101
+ text_w, text_h = text_size
102
+
103
+ # 텍스트 배경 그리기 (검은색 배경)
104
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
105
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
106
+
107
+ # 텍스트 그리기 (흰색 텍스트)
108
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
109
+ font, 1, (255, 255, 255), 2)
110
+
111
+ else:
112
+ alpha = 0.08
113
+
114
+ colored_obj_mask = np.zeros_like(frame)
115
+ colored_obj_mask[obj_mask == 1] = colors[j]
116
+ frame[obj_mask == 1] = (
117
+ (1 - alpha) * frame[obj_mask == 1]
118
+ + alpha * colored_obj_mask[obj_mask == 1]
119
+ )
120
+
121
+
122
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
123
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
124
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
125
+
126
+
127
+
128
+ if len(contours) > 0:
129
+ largest_contour = max(contours, key=cv2.contourArea)
130
+ M = cv2.moments(largest_contour)
131
+ if M["m00"] != 0:
132
+ center_x = int(M["m10"] / M["m00"])
133
+ center_y = int(M["m01"] / M["m00"])
134
+ else:
135
+ center_x, center_y = 0, 0
136
+
137
+ font = cv2.FONT_HERSHEY_SIMPLEX
138
+ text = obj_id
139
+
140
+ font_scale = 0.9
141
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
142
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
143
+ text_y = center_y
144
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
145
+
146
+ # 텍스트 배경 사각형 좌표 계산
147
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
148
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
149
+ rect_end = (text_x + text_size[0] + 5, text_y)
150
+
151
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
152
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
153
+
154
+ # plt.figure(figsize=(12, 8))
155
+ # plt.imshow(frame)
156
+ # plt.title(f"frame {frame_name}")
157
+ # plt.tight_layout()
158
+ # plt.axis('off')
159
+ # plt.show()
160
+
161
+ buffer = BytesIO()
162
+ frame = Image.fromarray(frame)
163
+ frame.save(buffer, format='jpeg')
164
+ buffer.seek(0)
165
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
166
+ frame_cat_cnts[frame_name] = cat_cnt
167
+
168
+ buffer.seek(0) # Reuse buffer instead of creating a new one
169
+ buffer.truncate()
170
+ frame_for_contour = Image.fromarray(frame_for_contour)
171
+ frame_for_contour.save(buffer, format='jpeg')
172
+ buffer.seek(0)
173
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
174
+
175
+ encoded_frames[cat] = cat_frames
176
+ contoured_frames[cat] = contour_frames
177
+ vid_cat_cnts[cat] = frame_cat_cnts
178
+
179
+ return encoded_frames, vid_cat_cnts, contoured_frames
180
+
181
+
182
+ def getCaption(idx, model='gpt-4o', color_mask=True):
183
+ vid_meta = metas[idx]
184
+ vid_data = train_dataset[idx]
185
+ vid_id = vid_meta['video']
186
+ print(f"vid id: {vid_id}\n")
187
+
188
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
189
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
190
+ all_captions = dict()
191
+
192
+ base64_frames, vid_cat_cnts, contoured_frames = number_objects_and_encode(idx, color_mask)
193
+ #marked = "mask with boundary" if color_mask else "boundary"
194
+
195
+ for cat_name in list(cat_names) :
196
+
197
+ is_movable = False
198
+ if cat_name in ytvos_category_valid_list :
199
+ is_movable = True
200
+
201
+ if not is_movable:
202
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
203
+
204
+
205
+ image_captions = {}
206
+ captioner = OpenAI()
207
+ cat_base64_frames = base64_frames[cat_name]
208
+ cont_base64_frames = contoured_frames[cat_name]
209
+
210
+ for i in range(len(cat_base64_frames)):
211
+ frame_name = frame_indx[i]
212
+ cont_base64_image = cont_base64_frames[i]
213
+ base64_image = cat_base64_frames[i]
214
+ should_filter = False
215
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
216
+
217
+ if frame_cat_cnts >= 2:
218
+ should_filter = True
219
+ else:
220
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
221
+
222
+ if is_movable and should_filter:
223
+ #1단계: 필터링
224
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
225
+ caption_filter_text = f"""
226
+ You are a visual assistant analyzing a single frame from a video.
227
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
228
+
229
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
230
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing the camera, stretching, walking...), motion cues (inferred from the momentary stance or position),
231
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
232
+
233
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
234
+
235
+ - Respond with "YES" if:
236
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
237
+ (e.g. standing, sitting, bending, stretching, showing its back, or turning toward the camera.)
238
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
239
+ 3) Interaction Variability: Each {cat_name} is engaged in a different type of action, such as one grasping an object while another is observing.
240
+
241
+ - Respond with "NONE" if:
242
+ 1) The actions or pose are not clearly differentiable or too similar.
243
+ 2) Minimal or Ambiguous Motion: The frame does not provide clear evidence of distinct movement beyond subtle shifts in stance.
244
+ 3) Passive or Neutral Poses: If multiple {cat_name}(s) are simply standing or sitting without an obvious difference in orientation or motion
245
+
246
+ Answer strictly with either "YES" or "NONE".
247
+ """
248
+
249
+ response1 = captioner.chat.completions.create(
250
+ # model="chatgpt-4o-latest",
251
+ model=model,
252
+ messages=[
253
+ {
254
+ "role": "user",
255
+ "content": [
256
+ {
257
+ "type": "text",
258
+ "text": caption_filter_text,
259
+ },
260
+ {
261
+ "type": "image_url",
262
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
263
+ }
264
+ ],
265
+ }
266
+ ],
267
+ )
268
+ response_content = response1.choices[0].message.content
269
+ should_caption = True if "yes" in response_content.lower() else False
270
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
271
+
272
+ else:
273
+ should_caption = False
274
+
275
+ #2단계: dense caption 만들기
276
+ dense_caption_prompt_1 = f"""You are a visual assistant that can analyze a single frame of a video and create referring expressions for each object.
277
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
278
+ I want to use your expressions to create a action-centric referring expression dataset.
279
+ Therefore, your expressions for these {cat_name}s should describe unique action of each object.
280
+
281
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
282
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
283
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
284
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
285
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
286
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
287
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
288
+ 8. Include interactions with objects or other entities when they are prominent and observable.
289
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
290
+ 10. Do not include descriptions of appearance such as clothes, color, size, shape etc.
291
+ 11. Do not include relative position between objects such as 'the left elephant' because left/right can be ambiguous.
292
+ 12. Do not mention object IDs.
293
+ 13. Use '{cat_name}' as the noun for the referring expressions.
294
+
295
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
296
+ Output referring expressions for each object id.
297
+ """
298
+
299
+ dense_caption_prompt = f"""
300
+ You are a visual assistant analyzing a single frame of a video.
301
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
302
+
303
+ I want to use your expressions to create an **action-centric referring expression** dataset.
304
+ Please describe each {cat_name} using **clearly observable** and **specific** actions.
305
+
306
+ ---
307
+ ## Guidelines:
308
+ 1. **Focus on visible, prominent actions** only (e.g., running, pushing, grasping an object).
309
+ 2. **Avoid describing minor or ambiguous actions** (e.g., "slightly moving a paw", "slightly tilting head").
310
+ 3. **Do not include subjective or speculative descriptions** (e.g., “it seems excited” or “it might be preparing to jump”).
311
+ 4. **Avoid vague expressions** like "interacting with something" or "engaging with another object." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
312
+ 5. **Use dynamic action verbs** (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
313
+ 6. If multiple {cat_name}s appear, ensure each description **differentiates** their actions.
314
+ 7. Base your description on these action definitions:
315
+ - Avoid using term 'minimal' or 'slightly'.
316
+ - General body movement, body position, or pattern which is prominent. (e.g. "lifting head up", "facing towards", "showing its back")
317
+ - details such as motion and intention, facial with object manipulation
318
+ - movements with objects or other entities when they are prominent and observable. expression should be specific.
319
+ (e.g., "pushing another person" (O), "engaging with someone" (X) "interacting with another person" (X))
320
+ ---
321
+
322
+ ## Output Format:
323
+ - For each labeled {cat_name}, output **exactly one line**. Your answer should contain details and follow the following format :
324
+ object id. using {cat_name} as subject noun, action-oriented description
325
+ (e.g. 1. the person is holding ski poles and skiing on a snow mountain, with his two legs bent forward.)
326
+ - **Only include the currently labeled category** in each line (e.g., if it’s a person, do not suddenly label it as other object/animal).
327
+
328
+ ### Example
329
+ If the frame has 2 labeled bears, your output should look like:
330
+ 1. the bear reaching his right arm while leaning forward to capture the prey
331
+ 2. a bear standing upright facing right, touching the bike aside
332
+
333
+ ---
334
+ **Do not include** appearance details (e.g., color, size, texture) or relative positioning (e.g., “on the left/right”).
335
+ **Do not include object IDs** or reference them (e.g., "Person 1" or "object 2" is not allowed).
336
+ **Do not include markdown** in the output.
337
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
338
+ For each labeled {cat_name}, output referring expressions for each object id.
339
+ """
340
+ MAX_RETRIES = 2
341
+ retry_count = 0
342
+
343
+ if should_caption:
344
+ while retry_count < MAX_RETRIES:
345
+
346
+ response2 = captioner.chat.completions.create(
347
+ model=model,
348
+ messages=[
349
+ {
350
+ "role": "user",
351
+ "content": [
352
+ {
353
+ "type": "text",
354
+ "text": dense_caption_prompt,
355
+ },
356
+ {
357
+ "type": "image_url",
358
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
359
+ },
360
+ ],
361
+ }
362
+ ],
363
+ )
364
+
365
+ # caption = response2.choices[0].message.content
366
+ #print(f"{image_path} - {frame_name}: {caption}")
367
+
368
+ caption = response2.choices[0].message.content.strip()
369
+ caption_lower = caption.lower().lstrip()
370
+
371
+ if caption_lower.startswith("1.") and not any(
372
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
373
+ ):
374
+ break
375
+
376
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
377
+ retry_count += 1
378
+ time.sleep(2)
379
+
380
+ if retry_count == MAX_RETRIES:
381
+ caption = None
382
+ print("Max retries reached. Caption generation failed.")
383
+
384
+ else:
385
+ caption = None
386
+
387
+ image_captions[frame_name] = caption
388
+ all_captions[cat_name] = image_captions
389
+
390
+ # final : also prepare valid object ids
391
+ valid_obj_ids = dict()
392
+
393
+ for cat in cat_names:
394
+ if cat in ytvos_category_valid_list:
395
+ obj_id_cat = vid_meta['obj_id_cat']
396
+ valid_cat_ids = []
397
+ for obj_id in list(obj_id_cat.keys()):
398
+ if obj_id_cat[obj_id] == cat:
399
+ valid_cat_ids.append(obj_id)
400
+ valid_obj_ids[cat] = valid_cat_ids
401
+
402
+ return all_captions, valid_obj_ids
403
+
404
+
405
+
406
+ if __name__ == '__main__':
407
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
408
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions.json")
409
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids.json")
410
+
411
+ args = parser.parse_args()
412
+
413
+ #==================데이터 불러오기===================
414
+ # 전체 데이터셋
415
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
416
+
417
+ # 전체 데이터셋 메타데이터
418
+ metas = train_dataset.metas
419
+
420
+ # 색상 후보 8개 (RGB 형식)
421
+ colors = [
422
+ (255, 0, 0), # Red
423
+ (0, 255, 0), # Green
424
+ (0, 0, 255), # Blue
425
+ (255, 255, 0), # Yellow
426
+ (255, 0, 255), # Magenta
427
+ (0, 255, 255), # Cyan
428
+ (128, 0, 128), # Purple
429
+ (255, 165, 0) # Orange
430
+ ]
431
+
432
+ ytvos_category_valid_list = [
433
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
434
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
435
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
436
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
437
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
438
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
439
+ ]
440
+
441
+ #==================gpt 돌리기===================
442
+ os.environ['OPENAI_API_KEY'] = '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'
443
+
444
+ result_captions = {}
445
+ result_valid_obj_ids = {}
446
+
447
+ for i in range(370):
448
+ vid_id, all_captions, valid_obj_ids = getCaption(i, True)
449
+
450
+ if vid_id not in result_captions:
451
+ result_captions[vid_id] = all_captions
452
+ if vid_id not in result_valid_obj_ids:
453
+ result_valid_obj_ids[vid_id] = valid_obj_ids
454
+
455
+ print("Finished!", flush=True)
456
+
457
+ with open(args.save_caption_path, "w") as file:
458
+ json.dump(result_captions, file, indent=4)
459
+
460
+ with open(args.save_valid_obj_ids_path, "w") as file:
461
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/gpt_ref-ytvos_numbered_cy_20250201141240.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+ import time
5
+
6
+ from os import path as osp
7
+ from io import BytesIO
8
+
9
+ from mbench.ytvos_ref import build as build_ytvos_ref
10
+ import argparse
11
+ import opts
12
+
13
+ import sys
14
+ from pathlib import Path
15
+ import os
16
+ from os import path as osp
17
+ import skimage
18
+ from io import BytesIO
19
+
20
+ import numpy as np
21
+ import pandas as pd
22
+ import regex as re
23
+ import json
24
+
25
+ import cv2
26
+ from PIL import Image, ImageDraw
27
+ import torch
28
+ from torchvision.transforms import functional as F
29
+
30
+ from skimage import measure # (pip install scikit-image)
31
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
32
+
33
+ import matplotlib.pyplot as plt
34
+ import matplotlib.patches as patches
35
+ from matplotlib.collections import PatchCollection
36
+ from matplotlib.patches import Rectangle
37
+ import textwrap
38
+
39
+
40
+ import ipywidgets as widgets
41
+ from IPython.display import display, clear_output
42
+
43
+ from openai import OpenAI
44
+ import base64
45
+ import json
46
+
47
+ def number_objects_and_encode(idx, color_mask=False):
48
+ encoded_frames = {}
49
+ contoured_frames = {} # New dictionary for original images
50
+ vid_cat_cnts = {}
51
+
52
+ vid_meta = metas[idx]
53
+ vid_data = train_dataset[idx]
54
+ vid_id = vid_meta['video']
55
+ frame_indx = vid_meta['sample_indx']
56
+ cat_names = set(vid_meta['obj_id_cat'].values())
57
+ imgs = vid_data[0]
58
+
59
+ for cat in cat_names:
60
+ cat_frames = []
61
+ contour_frames = []
62
+ frame_cat_cnts = {}
63
+
64
+ for i in range(imgs.size(0)):
65
+ frame_name = frame_indx[i]
66
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
67
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
68
+
69
+ frame_data = vid_data[2][frame_name]
70
+ obj_ids = list(frame_data.keys())
71
+
72
+ cat_cnt = 0
73
+
74
+ for j in range(len(obj_ids)):
75
+ obj_id = obj_ids[j]
76
+ obj_data = frame_data[obj_id]
77
+ obj_bbox = obj_data['bbox']
78
+ obj_valid = obj_data['valid']
79
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
80
+ obj_cat = obj_data['category_name']
81
+
82
+ if obj_cat == cat and obj_valid:
83
+ cat_cnt += 1
84
+
85
+ if color_mask == False:
86
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
87
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
88
+ for i, contour in enumerate(contours):
89
+ # 윤곽선 중심 계산
90
+ moments = cv2.moments(contour)
91
+ if moments["m00"] != 0: # 중심 계산 가능 여부 확인
92
+ cx = int(moments["m10"] / moments["m00"])
93
+ cy = int(moments["m01"] / moments["m00"])
94
+ else:
95
+ cx, cy = contour[0][0] # 중심 계산 불가시 대체 좌표 사용
96
+
97
+ # 텍스트 배경 (검은색 배경 만들기)
98
+ font = cv2.FONT_HERSHEY_SIMPLEX
99
+ text = obj_id
100
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
101
+ text_w, text_h = text_size
102
+
103
+ # 텍스트 배경 그리기 (검은색 배경)
104
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
105
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
106
+
107
+ # 텍스트 그리기 (흰색 텍스트)
108
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
109
+ font, 1, (255, 255, 255), 2)
110
+
111
+ else:
112
+ alpha = 0.08
113
+
114
+ colored_obj_mask = np.zeros_like(frame)
115
+ colored_obj_mask[obj_mask == 1] = colors[j]
116
+ frame[obj_mask == 1] = (
117
+ (1 - alpha) * frame[obj_mask == 1]
118
+ + alpha * colored_obj_mask[obj_mask == 1]
119
+ )
120
+
121
+
122
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
123
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
124
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
125
+
126
+
127
+
128
+ if len(contours) > 0:
129
+ largest_contour = max(contours, key=cv2.contourArea)
130
+ M = cv2.moments(largest_contour)
131
+ if M["m00"] != 0:
132
+ center_x = int(M["m10"] / M["m00"])
133
+ center_y = int(M["m01"] / M["m00"])
134
+ else:
135
+ center_x, center_y = 0, 0
136
+
137
+ font = cv2.FONT_HERSHEY_SIMPLEX
138
+ text = obj_id
139
+
140
+ font_scale = 0.9
141
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
142
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
143
+ text_y = center_y
144
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
145
+
146
+ # 텍스트 배경 사각형 좌표 계산
147
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
148
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
149
+ rect_end = (text_x + text_size[0] + 5, text_y)
150
+
151
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
152
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
153
+
154
+ # plt.figure(figsize=(12, 8))
155
+ # plt.imshow(frame)
156
+ # plt.title(f"frame {frame_name}")
157
+ # plt.tight_layout()
158
+ # plt.axis('off')
159
+ # plt.show()
160
+
161
+ buffer = BytesIO()
162
+ frame = Image.fromarray(frame)
163
+ frame.save(buffer, format='jpeg')
164
+ buffer.seek(0)
165
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
166
+ frame_cat_cnts[frame_name] = cat_cnt
167
+
168
+ buffer.seek(0) # Reuse buffer instead of creating a new one
169
+ buffer.truncate()
170
+ frame_for_contour = Image.fromarray(frame_for_contour)
171
+ frame_for_contour.save(buffer, format='jpeg')
172
+ buffer.seek(0)
173
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
174
+
175
+ encoded_frames[cat] = cat_frames
176
+ contoured_frames[cat] = contour_frames
177
+ vid_cat_cnts[cat] = frame_cat_cnts
178
+
179
+ return encoded_frames, vid_cat_cnts, contoured_frames
180
+
181
+
182
+ def getCaption(idx, model='gpt-4o', color_mask=True):
183
+ vid_meta = metas[idx]
184
+ vid_data = train_dataset[idx]
185
+ vid_id = vid_meta['video']
186
+ print(f"vid id: {vid_id}\n")
187
+
188
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
189
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
190
+ all_captions = dict()
191
+
192
+ base64_frames, vid_cat_cnts, contoured_frames = number_objects_and_encode(idx, color_mask)
193
+ #marked = "mask with boundary" if color_mask else "boundary"
194
+
195
+ for cat_name in list(cat_names) :
196
+
197
+ is_movable = False
198
+ if cat_name in ytvos_category_valid_list :
199
+ is_movable = True
200
+
201
+ if not is_movable:
202
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
203
+
204
+
205
+ image_captions = {}
206
+ captioner = OpenAI()
207
+ cat_base64_frames = base64_frames[cat_name]
208
+ cont_base64_frames = contoured_frames[cat_name]
209
+
210
+ for i in range(len(cat_base64_frames)):
211
+ frame_name = frame_indx[i]
212
+ cont_base64_image = cont_base64_frames[i]
213
+ base64_image = cat_base64_frames[i]
214
+ should_filter = False
215
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
216
+
217
+ if frame_cat_cnts >= 2:
218
+ should_filter = True
219
+ else:
220
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
221
+
222
+ if is_movable and should_filter:
223
+ #1단계: 필터링
224
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
225
+ caption_filter_text = f"""
226
+ You are a visual assistant analyzing a single frame from a video.
227
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
228
+
229
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
230
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing the camera, stretching, walking...), motion cues (inferred from the momentary stance or position),
231
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
232
+
233
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
234
+
235
+ - Respond with "YES" if:
236
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
237
+ (e.g. standing, sitting, bending, stretching, showing its back, or turning toward the camera.)
238
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
239
+ 3) Interaction Variability: Each {cat_name} is engaged in a different type of action, such as one grasping an object while another is observing.
240
+
241
+ - Respond with "NONE" if:
242
+ 1) The actions or pose are not clearly differentiable or too similar.
243
+ 2) Minimal or Ambiguous Motion: The frame does not provide clear evidence of distinct movement beyond subtle shifts in stance.
244
+ 3) Passive or Neutral Poses: If multiple {cat_name}(s) are simply standing or sitting without an obvious difference in orientation or motion
245
+
246
+ Answer strictly with either "YES" or "NONE".
247
+ """
248
+
249
+ response1 = captioner.chat.completions.create(
250
+ model=model,
251
+ messages=[
252
+ {
253
+ "role": "user",
254
+ "content": [
255
+ {
256
+ "type": "text",
257
+ "text": caption_filter_text,
258
+ },
259
+ {
260
+ "type": "image_url",
261
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
262
+ }
263
+ ],
264
+ }
265
+ ],
266
+ )
267
+ response_content = response1.choices[0].message.content
268
+ should_caption = True if "yes" in response_content.lower() else False
269
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
270
+
271
+ else:
272
+ should_caption = False
273
+
274
+ #2단계: dense caption 만들기
275
+ dense_caption_prompt_1 = f"""You are a visual assistant that can analyze a single frame of a video and create referring expressions for each object.
276
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
277
+ I want to use your expressions to create a action-centric referring expression dataset.
278
+ Therefore, your expressions for these {cat_name}s should describe unique action of each object.
279
+
280
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
281
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
282
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
283
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
284
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
285
+ 6. Avoid overly detailed or speculative descriptions such as 'slightly moving its mouth' or 'appears to be anticipating'.
286
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
287
+ 8. Include interactions with objects or other entities when they are prominent and observable.
288
+ 9. If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
289
+ 10. Do not include descriptions of appearance such as clothes, color, size, shape etc.
290
+ 11. Do not include relative position between objects such as 'the left elephant' because left/right can be ambiguous.
291
+ 12. Do not mention object IDs.
292
+ 13. Use '{cat_name}' as the noun for the referring expressions.
293
+
294
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
295
+ Output referring expressions for each object id.
296
+ """
297
+
298
+ dense_caption_prompt = f"""
299
+ You are a visual assistant analyzing a single frame of a video.
300
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary.
301
+
302
+ I want to use your expressions to create an **action-centric referring expression** dataset.
303
+ Please describe each {cat_name} using **clearly observable** and **specific** actions.
304
+
305
+ ---
306
+ ## Guidelines:
307
+ 1. **Focus on visible, prominent actions** only (e.g., running, pushing, grasping an object).
308
+ 2. **Avoid describing minor or ambiguous actions** (e.g., "slightly moving a paw", "slightly tilting head").
309
+ 3. **Do not include subjective or speculative descriptions** (e.g., “it seems excited” or “it might be preparing to jump”).
310
+ 4. **Avoid vague expressions** like "interacting with something" or "engaging with another object." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
311
+ 5. **Use dynamic action verbs** (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
312
+ 6. If multiple {cat_name}s appear, ensure each description **differentiates** their actions.
313
+ 7. Base your description on these action definitions:
314
+ - Avoid using term 'minimal' or 'slightly'.
315
+ - General body movement, body position, or pattern which is prominent. (e.g. "lifting head up", "facing towards", "showing its back")
316
+ - details such as motion and intention, facial with object manipulation
317
+ - movements with objects or other entities when they are prominent and observable. expression should be specific.
318
+ (e.g., "pushing another person" (O), "engaging with someone" (X) "interacting with another person" (X))
319
+ ---
320
+
321
+ ## Output Format:
322
+ - For each labeled {cat_name}, output **exactly one line**. Your answer should contain details and follow the following format :
323
+ object id. using {cat_name} as subject noun, action-oriented description
324
+ (e.g. 1. the person is holding ski poles and skiing on a snow mountain, with his two legs bent forward.)
325
+ - **Only include the currently labeled category** in each line (e.g., if it’s a person, do not suddenly label it as other object/animal).
326
+
327
+ ### Example
328
+ If the frame has 2 labeled bears, your output should look like:
329
+ 1. the bear reaching his right arm while leaning forward to capture the prey
330
+ 2. a bear standing upright facing right, touching the bike aside
331
+
332
+ ---
333
+ **Do not include** appearance details (e.g., color, size, texture) or relative positioning (e.g., “on the left/right”).
334
+ **Do not include object IDs** or reference them (e.g., "Person 1" or "object 2" is not allowed).
335
+ **Do not include markdown** in the output.
336
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
337
+ For each labeled {cat_name}, output referring expressions for each object id.
338
+ """
339
+ MAX_RETRIES = 2
340
+ retry_count = 0
341
+
342
+ if should_caption:
343
+ while retry_count < MAX_RETRIES:
344
+
345
+ response2 = captioner.chat.completions.create(
346
+ model=model,
347
+ messages=[
348
+ {
349
+ "role": "user",
350
+ "content": [
351
+ {
352
+ "type": "text",
353
+ "text": dense_caption_prompt,
354
+ },
355
+ {
356
+ "type": "image_url",
357
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
358
+ },
359
+ ],
360
+ }
361
+ ],
362
+ )
363
+
364
+ # caption = response2.choices[0].message.content
365
+ #print(f"{image_path} - {frame_name}: {caption}")
366
+
367
+ caption = response2.choices[0].message.content.strip()
368
+ caption_lower = caption.lower().lstrip()
369
+
370
+ if caption_lower.startswith("1.") and not any(
371
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
372
+ ):
373
+ break
374
+
375
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
376
+ retry_count += 1
377
+ time.sleep(2)
378
+
379
+ if retry_count == MAX_RETRIES:
380
+ caption = None
381
+ print("Max retries reached. Caption generation failed.")
382
+
383
+ else:
384
+ caption = None
385
+
386
+ image_captions[frame_name] = caption
387
+ all_captions[cat_name] = image_captions
388
+
389
+ # final : also prepare valid object ids
390
+ valid_obj_ids = dict()
391
+
392
+ for cat in cat_names:
393
+ if cat in ytvos_category_valid_list:
394
+ obj_id_cat = vid_meta['obj_id_cat']
395
+ valid_cat_ids = []
396
+ for obj_id in list(obj_id_cat.keys()):
397
+ if obj_id_cat[obj_id] == cat:
398
+ valid_cat_ids.append(obj_id)
399
+ valid_obj_ids[cat] = valid_cat_ids
400
+
401
+ return all_captions, valid_obj_ids
402
+
403
+
404
+
405
+ if __name__ == '__main__':
406
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
407
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions.json")
408
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids.json")
409
+
410
+ args = parser.parse_args()
411
+
412
+ #==================데이터 불러오기===================
413
+ # 전체 데이터셋
414
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
415
+
416
+ # 전체 데이터셋 메타데이터
417
+ metas = train_dataset.metas
418
+
419
+ # 색상 후보 8개 (RGB 형식)
420
+ colors = [
421
+ (255, 0, 0), # Red
422
+ (0, 255, 0), # Green
423
+ (0, 0, 255), # Blue
424
+ (255, 255, 0), # Yellow
425
+ (255, 0, 255), # Magenta
426
+ (0, 255, 255), # Cyan
427
+ (128, 0, 128), # Purple
428
+ (255, 165, 0) # Orange
429
+ ]
430
+
431
+ ytvos_category_valid_list = [
432
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
433
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
434
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
435
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
436
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
437
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
438
+ ]
439
+
440
+ #==================gpt 돌리기===================
441
+ os.environ['OPENAI_API_KEY'] = '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'
442
+
443
+ result_captions = {}
444
+ result_valid_obj_ids = {}
445
+
446
+ for i in range(370):
447
+ vid_id, all_captions, valid_obj_ids = getCaption(i, True)
448
+
449
+ if vid_id not in result_captions:
450
+ result_captions[vid_id] = all_captions
451
+ if vid_id not in result_valid_obj_ids:
452
+ result_valid_obj_ids[vid_id] = valid_obj_ids
453
+
454
+ print("Finished!", flush=True)
455
+
456
+ with open(args.save_caption_path, "w") as file:
457
+ json.dump(result_captions, file, indent=4)
458
+
459
+ with open(args.save_valid_obj_ids_path, "w") as file:
460
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/gpt_ref-ytvos_numbered_cy_sanity_2_20250207172754.py ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+ import time
5
+
6
+ from os import path as osp
7
+ from io import BytesIO
8
+ import random
9
+
10
+ from mbench.ytvos_ref import build as build_ytvos_ref
11
+ import argparse
12
+ import opts
13
+
14
+ import sys
15
+ from pathlib import Path
16
+ import os
17
+ from os import path as osp
18
+ import skimage
19
+ from io import BytesIO
20
+
21
+ import numpy as np
22
+ import pandas as pd
23
+ import regex as re
24
+ import json
25
+
26
+ import cv2
27
+ from PIL import Image, ImageDraw
28
+ import torch
29
+ from torchvision.transforms import functional as F
30
+
31
+ from skimage import measure # (pip install scikit-image)
32
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
33
+
34
+ import matplotlib.pyplot as plt
35
+ import matplotlib.patches as patches
36
+ from matplotlib.collections import PatchCollection
37
+ from matplotlib.patches import Rectangle
38
+ import textwrap
39
+
40
+
41
+ import ipywidgets as widgets
42
+ from IPython.display import display, clear_output
43
+
44
+ from openai import OpenAI
45
+ import base64
46
+ import json
47
+ import requests
48
+ from openai.error import APIConnectionError, OpenAIError
49
+
50
+ def number_objects_and_encode_old(idx, color_mask=False):
51
+ encoded_frames = {}
52
+ contoured_frames = {} # New dictionary for original images
53
+ vid_cat_cnts = {}
54
+
55
+ vid_meta = metas[idx]
56
+ vid_data = train_dataset[idx]
57
+ vid_id = vid_meta['video']
58
+ frame_indx = vid_meta['sample_indx']
59
+ cat_names = set(vid_meta['obj_id_cat'].values())
60
+ imgs = vid_data[0]
61
+
62
+ for cat in cat_names:
63
+ cat_frames = []
64
+ contour_frames = []
65
+ frame_cat_cnts = {}
66
+
67
+ for i in range(imgs.size(0)):
68
+ frame_name = frame_indx[i]
69
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
70
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
71
+
72
+ frame_data = vid_data[2][frame_name]
73
+ obj_ids = list(frame_data.keys())
74
+
75
+ cat_cnt = 0
76
+
77
+ for j in range(len(obj_ids)):
78
+ obj_id = obj_ids[j]
79
+ obj_data = frame_data[obj_id]
80
+ obj_bbox = obj_data['bbox']
81
+ obj_valid = obj_data['valid']
82
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
83
+ obj_cat = obj_data['category_name']
84
+
85
+ if obj_cat == cat and obj_valid:
86
+ cat_cnt += 1
87
+
88
+ if color_mask == False:
89
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
90
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
91
+ for i, contour in enumerate(contours):
92
+ moments = cv2.moments(contour)
93
+ if moments["m00"] != 0:
94
+ cx = int(moments["m10"] / moments["m00"])
95
+ cy = int(moments["m01"] / moments["m00"])
96
+ else:
97
+ cx, cy = contour[0][0]
98
+
99
+ font = cv2.FONT_HERSHEY_SIMPLEX
100
+ text = obj_id
101
+ text_size = cv2.getTextSize(text, font, 1, 2)[0]
102
+ text_w, text_h = text_size
103
+
104
+ cv2.rectangle(frame, (cx - text_w // 2 - 5, cy - text_h // 2 - 5),
105
+ (cx + text_w // 2 + 5, cy + text_h // 2 + 5), (0, 0, 0), -1)
106
+
107
+ cv2.putText(frame, text, (cx - text_w // 2, cy + text_h // 2),
108
+ font, 1, (255, 255, 255), 2)
109
+
110
+ else:
111
+ alpha = 0.08
112
+
113
+ colored_obj_mask = np.zeros_like(frame)
114
+ colored_obj_mask[obj_mask == 1] = colors[j]
115
+ frame[obj_mask == 1] = (
116
+ (1 - alpha) * frame[obj_mask == 1]
117
+ + alpha * colored_obj_mask[obj_mask == 1]
118
+ )
119
+
120
+
121
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
122
+ cv2.drawContours(frame, contours, -1, colors[j], 2)
123
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
124
+
125
+ if len(contours) > 0:
126
+ largest_contour = max(contours, key=cv2.contourArea)
127
+ M = cv2.moments(largest_contour)
128
+ if M["m00"] != 0:
129
+ center_x = int(M["m10"] / M["m00"])
130
+ center_y = int(M["m01"] / M["m00"])
131
+ else:
132
+ center_x, center_y = 0, 0
133
+
134
+ font = cv2.FONT_HERSHEY_SIMPLEX
135
+ text = obj_id
136
+
137
+ font_scale = 0.9
138
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
139
+ text_x = center_x - text_size[0] // 1
140
+ text_y = center_y
141
+
142
+ rect_start = (text_x - 5, text_y - text_size[1] - 5)
143
+ rect_end = (text_x + text_size[0] + 5, text_y)
144
+
145
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
146
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
147
+
148
+ # plt.figure(figsize=(12, 8))
149
+ # plt.imshow(frame)
150
+ # plt.title(f"frame {frame_name}")
151
+ # plt.tight_layout()
152
+ # plt.axis('off')
153
+ # plt.show()
154
+
155
+ buffer = BytesIO()
156
+ frame = Image.fromarray(frame)
157
+ frame.save(buffer, format='jpeg')
158
+ buffer.seek(0)
159
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
160
+ frame_cat_cnts[frame_name] = cat_cnt
161
+
162
+ buffer.seek(0) # Reuse buffer instead of creating a new one
163
+ buffer.truncate()
164
+ frame_for_contour = Image.fromarray(frame_for_contour)
165
+ frame_for_contour.save(buffer, format='jpeg')
166
+ buffer.seek(0)
167
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
168
+
169
+ encoded_frames[cat] = cat_frames
170
+ contoured_frames[cat] = contour_frames
171
+ vid_cat_cnts[cat] = frame_cat_cnts
172
+
173
+ return encoded_frames, contoured_frames, vid_cat_cnts
174
+
175
+
176
+ def number_objects_and_encode(idx, color_mask=False):
177
+ encoded_frames = {}
178
+ contoured_frames = {} # New dictionary for original images
179
+ vid_cat_cnts = {}
180
+
181
+ vid_meta = metas[idx]
182
+ vid_data = train_dataset[idx]
183
+ vid_id = vid_meta['video']
184
+ frame_indx = vid_meta['sample_indx']
185
+ cat_names = set(vid_meta['obj_id_cat'].values())
186
+ imgs = vid_data[0]
187
+
188
+ for cat in cat_names:
189
+ cat_frames = []
190
+ contour_frames = []
191
+ frame_cat_cnts = {}
192
+
193
+ for i in range(imgs.size(0)):
194
+ frame_name = frame_indx[i]
195
+ frame = np.copy(imgs[i].permute(1, 2, 0).numpy())
196
+ frame_for_contour = np.copy(imgs[i].permute(1, 2, 0).numpy())
197
+
198
+ frame_data = vid_data[2][frame_name]
199
+ obj_ids = list(frame_data.keys())
200
+
201
+ cat_cnt = 0
202
+
203
+ for j in range(len(obj_ids)):
204
+ obj_id = obj_ids[j]
205
+ obj_data = frame_data[obj_id]
206
+ obj_bbox = obj_data['bbox']
207
+ obj_valid = obj_data['valid']
208
+ obj_mask = obj_data['mask'].numpy().astype(np.uint8)
209
+ obj_cat = obj_data['category_name']
210
+
211
+ if obj_cat == cat and obj_valid:
212
+ cat_cnt += 1
213
+
214
+ contours, _ = cv2.findContours(obj_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
215
+ cv2.drawContours(frame, contours, -1, colors[j], 3)
216
+ cv2.drawContours(frame_for_contour, contours, -1, colors[j], 2)
217
+
218
+ if len(contours) > 0:
219
+ largest_contour = max(contours, key=cv2.contourArea)
220
+ M = cv2.moments(largest_contour)
221
+ if M["m00"] != 0:
222
+ center_x = int(M["m10"] / M["m00"])
223
+ center_y = int(M["m01"] / M["m00"])
224
+ else:
225
+ center_x, center_y = 0, 0
226
+
227
+ font = cv2.FONT_HERSHEY_SIMPLEX
228
+ text = obj_id
229
+ font_scale = 1.2
230
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
231
+ text_x = center_x - text_size[0] // 1
232
+ text_y = center_y
233
+
234
+ rect_start = (text_x - 5, text_y - text_size[1] - 5)
235
+ rect_end = (text_x + text_size[0] + 5, text_y + 3)
236
+
237
+ contour_thickness = 1
238
+ rect_start_contour = (rect_start[0] - contour_thickness, rect_start[1] - contour_thickness)
239
+ rect_end_contour = (rect_end[0] + contour_thickness, rect_end[1] + contour_thickness)
240
+
241
+ cv2.rectangle(frame, rect_start_contour, rect_end_contour, colors[j], contour_thickness)
242
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
243
+ cv2.putText(frame, text, (text_x, text_y), font, 1, (255, 255, 255), 2)
244
+
245
+
246
+ if color_mask:
247
+ alpha = 0.08
248
+ colored_obj_mask = np.zeros_like(frame)
249
+ colored_obj_mask[obj_mask == 1] = colors[j]
250
+ frame[obj_mask == 1] = (
251
+ (1 - alpha) * frame[obj_mask == 1]
252
+ + alpha * colored_obj_mask[obj_mask == 1]
253
+ )
254
+
255
+ # plt.figure(figsize=(12, 8))
256
+ # plt.imshow(frame)
257
+ # plt.title(f"frame {frame_name}")
258
+ # plt.tight_layout()
259
+ # plt.axis('off')
260
+ # plt.show()
261
+
262
+ buffer = BytesIO()
263
+ frame = Image.fromarray(frame)
264
+ frame.save(buffer, format='jpeg')
265
+ buffer.seek(0)
266
+ cat_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
267
+ frame_cat_cnts[frame_name] = cat_cnt
268
+
269
+ buffer.seek(0) # Reuse buffer instead of creating a new one
270
+ buffer.truncate()
271
+ frame_for_contour = Image.fromarray(frame_for_contour)
272
+ frame_for_contour.save(buffer, format='jpeg')
273
+ buffer.seek(0)
274
+ contour_frames.append(base64.b64encode(buffer.read()).decode("utf-8"))
275
+
276
+ encoded_frames[cat] = cat_frames
277
+ contoured_frames[cat] = contour_frames
278
+ vid_cat_cnts[cat] = frame_cat_cnts
279
+
280
+ return encoded_frames, contoured_frames, vid_cat_cnts
281
+
282
+
283
+
284
+ def getCaption(idx, model='gpt-4o'):
285
+ vid_meta = metas[idx]
286
+ vid_data = train_dataset[idx]
287
+ vid_id = vid_meta['video']
288
+ print(f"vid id: {vid_id}\n")
289
+
290
+ frame_indx = vid_meta['sample_indx'] # e.g. [4, 7, 9, 16]
291
+ cat_names = set(vid_meta['obj_id_cat'].values()) # e.g. {"person", "elephant", ...}
292
+ all_captions = dict()
293
+
294
+ # color_mask = random.choice([True, False])
295
+ color_mask = random.choices([False, True], weights=[60, 40])[0]
296
+
297
+ base64_frames, _ , vid_cat_cnts = number_objects_and_encode(idx, color_mask)
298
+ #marked = "mask with boundary" if color_mask else "boundary"
299
+
300
+ for cat_name in list(cat_names) :
301
+
302
+ is_movable = False
303
+ if cat_name in ytvos_category_valid_list :
304
+ is_movable = True
305
+
306
+ if not is_movable:
307
+ print(f"Skipping {cat_name}: Determined to be non-movable.", end='\n\n')
308
+
309
+
310
+ image_captions = {}
311
+ captioner = OpenAI()
312
+ cat_base64_frames = base64_frames[cat_name]
313
+ # cont_base64_frames = contoured_frames[cat_name]
314
+
315
+ for i in range(len(cat_base64_frames)):
316
+ frame_name = frame_indx[i]
317
+ # cont_base64_image = cont_base64_frames[i]
318
+ base64_image = cat_base64_frames[i]
319
+ should_filter = False
320
+ frame_cat_cnts = vid_cat_cnts[cat_name][frame_name]
321
+
322
+ if frame_cat_cnts >= 2:
323
+ should_filter = True
324
+ else:
325
+ print(f"Skipping {cat_name}: There is single or no object.", end='\n\n')
326
+
327
+
328
+ if is_movable and should_filter:
329
+ #1단계: 필터링
330
+ print(f"-----------category name: {cat_name}, frame name: {frame_name}")
331
+ caption_filter_text = f"""
332
+ You are a visual assistant analyzing a single frame from a video.
333
+ In this frame, I have labeled {frame_cat_cnts} {cat_name}(s), each with a bright numeric ID at its center and a visible marker.
334
+
335
+ Are {cat_name}s in the image performing all different and recognizable actions or postures?
336
+ Consider differences in body pose (standing, sitting, holding hands up, grabbing object, facing the camera, stretching, walking...), motion cues (inferred from the momentary stance or position),
337
+ facial expressions, and any notable interactions with objects or other {cat_name}s or people.
338
+
339
+ Only focus on obvious, prominent actions that can be reliably identified from this single frame.
340
+
341
+ - Respond with "YES" if:
342
+ 1) Most of {cat_name}s exhibit clearly different, unique actions or poses.
343
+ (e.g. standing, sitting, bending, stretching, showing its back, or turning toward the camera.)
344
+ 2) You can see visible significant differences in action and posture, that an observer can identify at a glance.
345
+ 3) Interaction Variability: Each {cat_name} is engaged in a different type of action, such as one grasping an object while another is observing.
346
+
347
+ - Respond with "NONE" if:
348
+ 1) The actions or pose are not clearly differentiable or too similar.
349
+ 2) Minimal or Ambiguous Motion: The frame does not provide clear evidence of distinct movement beyond subtle shifts in stance.
350
+ 3) Passive or Neutral Poses: If multiple {cat_name}(s) are simply standing or sitting without an obvious difference in orientation or motion
351
+
352
+ Answer strictly with either "YES" or "NONE".
353
+ """
354
+
355
+ response1 = captioner.chat.completions.create(
356
+ model=model,
357
+ messages=[
358
+ {
359
+ "role": "user",
360
+ "content": [
361
+ {
362
+ "type": "text",
363
+ "text": caption_filter_text,
364
+ },
365
+ {
366
+ "type": "image_url",
367
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
368
+ }
369
+ ],
370
+ }
371
+ ],
372
+ )
373
+ response_content = response1.choices[0].message.content
374
+ should_caption = True if "yes" in response_content.lower() else False
375
+ print(f"are {cat_name}s distinguished by action: {response_content}", end='\n\n')
376
+
377
+ else:
378
+ should_caption = False
379
+
380
+ #2단계: dense caption 만들기
381
+ dense_caption_prompt_1 = f"""
382
+ In the given frame, I labeled {frame_cat_cnts} {cat_name}s by marking each with a bright numeric ID at the center and its boundary. The category name of these objects are : {cat_name}.
383
+
384
+ Please describe the image focusing on labeled {cat_name}s in detail, focusing on their actions and interactions.
385
+
386
+ 1. Focus only on clear, unique, and prominent actions that distinguish each object.
387
+ 2. Avoid describing actions that are too minor, ambiguous, or not visible from the image.
388
+ 3. Avoid subjective terms such as 'skilled', 'controlled', or 'focused'. Only describe observable actions.
389
+ 4. Do not include common-sense or overly general descriptions like 'the elephant walks'.
390
+ 5. Use dynamic action verbs (e.g., holding, throwing, jumping, inspecting) to describe interactions, poses, or movements.
391
+ 6. **Avoid overly detailed or speculative descriptions** such as 'slightly moving its mouth' or 'appears to be anticipating'.
392
+ - expressions like 'seems to be', 'appears to be' are BANNED!
393
+ 7. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
394
+ 8. Include interactions with objects or other entities when they are prominent and observable.
395
+ 9. **Do not include descriptions of appearance** such as clothes, color, size, shape etc.
396
+ 10. **Do not include relative position** between objects such as 'the left elephant' because left/right can be ambiguous.
397
+ 11. Do not mention object IDs.
398
+ 12. Use '{cat_name}' as the noun for the referring expressions.
399
+
400
+ Note that I want to use your description to create a grounding dataset, therefore, your descriptions for different objects should be unique, i.e., If the image contains multiple {cat_name}s, describe the actions of each individually and ensure the descriptions are non-overlapping and specific.
401
+
402
+ - Your answer should contain details, and follow the following format:
403
+ object id. action-oriented description
404
+ (e.g. 1. the person is holding bananas on two hands and opening his mouth, turning the head right.
405
+ 2. a person bending over and touching his boots to tie the shoelace.)
406
+ - for action-oriented description, use {cat_name} as subject noun
407
+
408
+ **Only include the currently labeled category** in each line (e.g., if it’s a person, do not suddenly label it as other object/animal).
409
+ Please pay attention to the categories of these objects and don’t change them.
410
+ Keep in mind that you should not group the objects, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
411
+ Output referring expressions for each object id. Please start your answer:"""
412
+
413
+
414
+ dense_caption_prompt_2 = f"""
415
+ You are an advanced visual language model analyzing a video frame.
416
+ In this frame, {frame_cat_cnts} objects belonging to the category **{cat_name}** have been distinctly labeled with bright numerical IDs at their center and boundary.
417
+
418
+ Your task is to generate **action-oriented descriptions** for each labeled {cat_name}.
419
+ Your descriptions should capture their **observable actions and interactions**, making sure to highlight movement, gestures, and dynamic behaviors.
420
+
421
+ ---
422
+ ## Key Guidelines:
423
+ 1. **Describe only clear and visible actions** that uniquely define what the {cat_name} is doing.
424
+ - Example: "grabbing a branch and pulling it down" (**(O) Specific**)
425
+ - Avoid: "moving slightly to the side" (**(X) Too vague**)
426
+
427
+ 2. **Do not describe appearance, color, or position**—focus purely on the action.
428
+ - (X) "A large brown bear standing on the left"
429
+ - (O) "The bear is lifting its front paws and swiping forward."
430
+
431
+ 3. **Use dynamic, action-specific verbs** rather than passive descriptions.
432
+ - (O) "The giraffe is tilting its head and sniffing the ground."
433
+ - (X) "The giraffe is near a tree and looking around."
434
+
435
+ 4. **Avoid assumptions, emotions, or speculative phrasing.**
436
+ - (X) "The person seems excited" / "The person might be preparing to jump."
437
+ - (O) "The person is pushing its front legs against the rock and leaping forward."
438
+
439
+ 5. **Avoid overly detailed or speculative descriptions** such as 'slightly moving its mouth' or 'appears to be anticipating'.
440
+ - expressions like 'seems to be', 'appears to be' are BANNED!
441
+ 6. Pretend you are observing the scene directly, avoiding phrases like 'it seems' or 'based on the description'.
442
+
443
+ 7. If multiple {cat_name}s are present, make sure their descriptions are **distinct and non-overlapping**.
444
+ - **Each object should have a unique, descriptive action.**
445
+ - (X) "Two dogs are running."
446
+ - (O) "1. One dog is chasing another, its legs stretched mid-air.
447
+ 2. The other dog is looking back while speeding up."
448
+
449
+ ---
450
+ ## Output Format:
451
+ - Each labeled **{cat_name}** should have exactly **one line of description**.
452
+ - Format: `ID. {cat_name} + action-based description`
453
+ - (O) Example:
454
+ ```
455
+ 1. The person is leaning forward while opening a bag with both hands.
456
+ 2. The person is holding onto a rope and pulling themselves up.
457
+ ```
458
+ - **Ensure that each object is described individually.**
459
+ - **Do not group objects into a single sentence** (e.g., "2-5. people: xxx" is NOT allowed).
460
+
461
+ ---
462
+ ## Additional Instructions:
463
+ - **Do NOT** use expressions like "it appears that..." or "it seems like...".
464
+ - **Do NOT** mention object IDs in the description (only use the provided format).
465
+ - **DO NOT** include markdown formatting (no bullet points, no asterisks).
466
+ - **Only describe actions of the labeled {cat_name} objects**—do not introduce unrelated categories.
467
+
468
+ Please generate the action-oriented descriptions for each labeled {cat_name} and start your answer:
469
+ """
470
+
471
+
472
+ dense_caption_prompt = f"""
473
+ You are a visual assistant analyzing a single frame of a video.
474
+ In this frame, {frame_cat_cnts} objects belonging to the category **{cat_name}** have been labeled with bright numeric IDs at their center and boundary.
475
+
476
+ I am building an **action-centric referring expression** dataset.
477
+ Your task is to describe each labeled {cat_name} based on **clearly observable and specific actions**.
478
+
479
+ ---
480
+ ## Guidelines:
481
+ 1. **Focus only on visible and prominent actions** (e.g., running, pushing, grasping an object).
482
+ 2. **Avoid describing minor or ambiguous movements** (e.g., "slightly moving a paw," "tilting head a bit").
483
+ 3. **Do not include subjective or speculative descriptions** (e.g., "it seems excited" or "it might be preparing to jump").
484
+ 4. **Avoid vague expressions** like "engaging with something." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
485
+ 5. **Use dynamic action verbs** (e.g., holding, throwing, inspecting, leaning, pressing) to highlight motion and interaction.
486
+ 6. If multiple {cat_name}s appear, ensure each description is **distinct and non-overlapping**.
487
+ 7. Base your descriptions on these principles:
488
+ - **Avoid words like 'minimal' or 'slightly'.**
489
+ - Emphasize **body movement, posture, and motion patterns** (e.g., "lifting its head," "facing forward," "showing its back").
490
+ - Describe **facial expressions and interactions with objects** (e.g., "opening its mouth wide," "smiling while holding an item").
491
+ - **Specify actions with other objects or entities** only when they are clear and observable.
492
+ - (O) "pushing another person"
493
+ - (X) "interacting with another object"
494
+
495
+ ---
496
+ ## Output Format:
497
+ - Each labeled **{cat_name}** must have **exactly one line**.
498
+ - Format: `ID. {cat_name} + action-based description`
499
+ - (O) Example:
500
+ ```
501
+ 1. The person is holding ski poles and skiing down a snowy mountain with bent knees.
502
+ 2. The person is pulling a baby carriage while smiling.
503
+ ```
504
+ - **Ensure each object is described individually.**
505
+ - **Do not group multiple objects into a single sentence** (e.g., "2-5. people: xxx" is NOT allowed).
506
+
507
+ ---
508
+ ## Example:
509
+ If the frame has two labeled **bears**, your output should be:
510
+ ```
511
+ 1. The bear is reaching out its right paw while leaning forward to catch prey.
512
+ 2. A bear is standing upright, facing right, and touching the bike beside it.
513
+ ```
514
+
515
+ ---
516
+ ## Additional Instructions:
517
+ - **Do NOT** describe appearance (e.g., color, size, texture) or relative positioning (e.g., "on the left/right").
518
+ - **Do NOT** reference object IDs explicitly (e.g., "Person 1" or "Object 2" is NOT allowed).
519
+ - **Do NOT** include markdown formatting (no bullet points, asterisks, or extra symbols).
520
+ - **Only describe actions of the labeled {cat_name} objects**—do not introduce unrelated categories.
521
+
522
+ Please generate the action-oriented descriptions for each labeled {cat_name} and start your answer:"""
523
+
524
+
525
+ MAX_RETRIES = 3
526
+ retry_count = 0
527
+
528
+ if should_caption:
529
+ while retry_count < MAX_RETRIES:
530
+ selected_prompt = random.choice([dense_caption_prompt, dense_caption_prompt_2])
531
+
532
+ response2 = captioner.chat.completions.create(
533
+ model=model,
534
+ messages=[
535
+ {
536
+ "role": "user",
537
+ "content": [
538
+ {
539
+ "type": "text",
540
+ "text": selected_prompt,
541
+ },
542
+ {
543
+ "type": "image_url",
544
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
545
+ },
546
+ ],
547
+ }
548
+ ],
549
+ )
550
+
551
+ # caption = response2.choices[0].message.content
552
+ #print(f"{image_path} - {frame_name}: {caption}")
553
+
554
+ caption = response2.choices[0].message.content.strip()
555
+ caption_lower = caption.lower().lstrip()
556
+
557
+ if caption_lower.startswith("1.") and not any(
558
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
559
+ ):
560
+ break
561
+
562
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
563
+ retry_count += 1
564
+ time.sleep(2)
565
+
566
+ if retry_count == MAX_RETRIES:
567
+ caption = None
568
+ print("Max retries reached. Caption generation failed.")
569
+
570
+ else:
571
+ caption = None
572
+
573
+ image_captions[frame_name] = caption
574
+ all_captions[cat_name] = image_captions
575
+
576
+ # final : also prepare valid object ids
577
+ valid_obj_ids = dict()
578
+
579
+ for cat in cat_names:
580
+ if cat in ytvos_category_valid_list:
581
+ obj_id_cat = vid_meta['obj_id_cat']
582
+ valid_cat_ids = []
583
+ for obj_id in list(obj_id_cat.keys()):
584
+ if obj_id_cat[obj_id] == cat:
585
+ valid_cat_ids.append(obj_id)
586
+ valid_obj_ids[cat] = valid_cat_ids
587
+
588
+ return vid_id, all_captions, valid_obj_ids
589
+
590
+
591
+ if __name__ == '__main__':
592
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
593
+ parser.add_argument('--save_caption_path', type=str, default="mbench/numbered_captions_gpt-4o_randcap.json")
594
+ parser.add_argument('--save_valid_obj_ids_path', type=str, default="mbench/numbered_valid_obj_ids_gpt-4o_randcap.json")
595
+
596
+ args = parser.parse_args()
597
+
598
+ #==================데이터 불러오기===================
599
+ # 전체 데이터셋
600
+ train_dataset = build_ytvos_ref(image_set = 'train', args = args)
601
+
602
+ # 전체 데이터셋 메타데이터
603
+ metas = train_dataset.metas
604
+
605
+ # 색상 후보 8개 (RGB 형식)
606
+ colors = [
607
+ (255, 0, 0), # Red
608
+ (0, 255, 0), # Green
609
+ (0, 0, 255), # Blue
610
+ (255, 255, 0), # Yellow
611
+ (255, 0, 255), # Magenta
612
+ (0, 255, 255), # Cyan
613
+ (128, 0, 128), # Purple
614
+ (255, 165, 0) # Orange
615
+ ]
616
+
617
+ ytvos_category_valid_list = [
618
+ 'airplane', 'ape', 'bear', 'bird', 'boat', 'bus', 'camel', 'cat', 'cow', 'crocodile',
619
+ 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal', 'elephant', 'fish', 'fox', 'frog',
620
+ 'giant_panda', 'giraffe', 'hedgehog', 'horse', 'leopard', 'lion', 'lizard',
621
+ 'monkey', 'motorbike', 'mouse', 'owl', 'parrot', 'penguin', 'person',
622
+ 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'snail', 'snake',
623
+ 'squirrel', 'tiger', 'train', 'truck', 'turtle', 'whale', 'zebra'
624
+ ]
625
+
626
+ #==================gpt 돌리기===================
627
+ os.environ['OPENAI_API_KEY'] = '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'
628
+
629
+ result_captions = {}
630
+ result_valid_obj_ids = {}
631
+
632
+ for i in range(len(metas)):
633
+ try:
634
+ vid_id, all_captions, valid_obj_ids = getCaption(i)
635
+
636
+ if vid_id not in result_captions:
637
+ result_captions[vid_id] = all_captions
638
+ if vid_id not in result_valid_obj_ids:
639
+ result_valid_obj_ids[vid_id] = valid_obj_ids
640
+
641
+ except (requests.exceptions.ConnectionError, APIConnectionError) as e:
642
+ print(f"created caption until {i}", flush=True)
643
+
644
+ with open(args.save_caption_path, "w") as file:
645
+ json.dump(result_captions, file, indent=4)
646
+
647
+ with open(args.save_valid_obj_ids_path, "w") as file:
648
+ json.dump(result_valid_obj_ids, file, indent=4)
649
+
650
+ print("Finished!", flush=True)
651
+
652
+ with open(args.save_caption_path, "w") as file:
653
+ json.dump(result_captions, file, indent=4)
654
+
655
+ with open(args.save_valid_obj_ids_path, "w") as file:
656
+ json.dump(result_valid_obj_ids, file, indent=4)
.history/mbench/make_ref-ytvos_json_20250113182322.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import build_dataset
2
+ import argparse
3
+ import opts
4
+
5
+ import sys
6
+ from pathlib import Path
7
+ import os
8
+ from os import path as osp
9
+ import io
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import regex as re
14
+ import json
15
+
16
+ import cv2
17
+ from PIL import Image, ImageDraw
18
+ import torch
19
+ from torchvision.transforms import functional as F
20
+
21
+ from skimage import measure # (pip install scikit-image)
22
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
23
+
24
+ import matplotlib.pyplot as plt
25
+ import matplotlib.patches as patches
26
+ from matplotlib.collections import PatchCollection
27
+ from matplotlib.patches import Rectangle
28
+
29
+
30
+ import ipywidgets as widgets
31
+ from IPython.display import display, clear_output
32
+
33
+ #==================json 만들기===================
34
+ def createJson(train_dataset, metas):
35
+ entire_json = {}
36
+
37
+ #초기화
38
+ data_idx = 0
39
+
40
+ while data_idx < 10:
41
+
42
+ #하나의 비디오에 대해
43
+ video_data = {}
44
+ video_id = metas[data_idx]['video']
45
+ video_data['bins'] = metas[data_idx]['bins']
46
+ annotation_data = []
47
+ frame_names = []
48
+
49
+ while metas[data_idx]['video'] == video_id:
50
+
51
+ obj_id = metas[data_idx]['obj_id']
52
+ sample_id = metas[data_idx]['sample_id']
53
+ sample_frames_id = metas[data_idx]['sample_frames_id']
54
+ sample_frame_idx = sample_frames_id.index(sample_id)
55
+
56
+ frames = metas[data_idx]['frames']
57
+
58
+ frame_name = frames[sample_id]
59
+ cat_name = metas[data_idx]['category']
60
+
61
+ bbox = train_dataset[data_idx][1]['boxes'][sample_frame_idx, :]
62
+
63
+ obj_data = {obj_id: {
64
+ "category_name" : cat_name,
65
+ "bbox": bbox
66
+ }}
67
+
68
+
69
+ annotation_data.append(obj_data)
70
+
71
+ frame_names.append(frame_name)
72
+
73
+ data_idx += 1
74
+
75
+ video_data['annotations'] = annotation_data
76
+ video_data['frame_names'] = frame_names
77
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
78
+
79
+ entire_json[video_id] = video_data
80
+
81
+ return entire_json
82
+
83
+
84
+ if __name__ == '__main__':
85
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
86
+ args = parser.parse_args()
87
+
88
+ #==================데이터 불러오기===================
89
+ # 전체 데이터셋
90
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
91
+
92
+ # 전체 데이터셋 메타데이터
93
+ metas = train_dataset.metas
94
+
95
+ #==================json 만들기===================
96
+ entire_json_dict = createJson(train_dataset, metas)
97
+ entire_json = json.dumps(entire_json_dict, indent=4)
98
+
99
+ with open('mbench/sampled_frame.json', mode='w') as file:
100
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250113182734.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
2
+
3
+ from datasets import build_dataset
4
+ import argparse
5
+ import opts
6
+
7
+ import sys
8
+ from pathlib import Path
9
+ import os
10
+ from os import path as osp
11
+ import io
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+ import regex as re
16
+ import json
17
+
18
+ import cv2
19
+ from PIL import Image, ImageDraw
20
+ import torch
21
+ from torchvision.transforms import functional as F
22
+
23
+ from skimage import measure # (pip install scikit-image)
24
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
25
+
26
+ import matplotlib.pyplot as plt
27
+ import matplotlib.patches as patches
28
+ from matplotlib.collections import PatchCollection
29
+ from matplotlib.patches import Rectangle
30
+
31
+
32
+ import ipywidgets as widgets
33
+ from IPython.display import display, clear_output
34
+
35
+ #==================json 만들기===================
36
+ def createJson(train_dataset, metas):
37
+ entire_json = {}
38
+
39
+ #초기화
40
+ data_idx = 0
41
+
42
+ while data_idx < 10:
43
+
44
+ #하나의 비디오에 대해
45
+ video_data = {}
46
+ video_id = metas[data_idx]['video']
47
+ video_data['bins'] = metas[data_idx]['bins']
48
+ annotation_data = []
49
+ frame_names = []
50
+
51
+ while metas[data_idx]['video'] == video_id:
52
+
53
+ obj_id = metas[data_idx]['obj_id']
54
+ sample_id = metas[data_idx]['sample_id']
55
+ sample_frames_id = metas[data_idx]['sample_frames_id']
56
+ sample_frame_idx = sample_frames_id.index(sample_id)
57
+
58
+ frames = metas[data_idx]['frames']
59
+
60
+ frame_name = frames[sample_id]
61
+ cat_name = metas[data_idx]['category']
62
+
63
+ bbox = train_dataset[data_idx][1]['boxes'][sample_frame_idx, :]
64
+
65
+ obj_data = {obj_id: {
66
+ "category_name" : cat_name,
67
+ "bbox": bbox
68
+ }}
69
+
70
+
71
+ annotation_data.append(obj_data)
72
+
73
+ frame_names.append(frame_name)
74
+
75
+ data_idx += 1
76
+
77
+ video_data['annotations'] = annotation_data
78
+ video_data['frame_names'] = frame_names
79
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
80
+
81
+ entire_json[video_id] = video_data
82
+
83
+ return entire_json
84
+
85
+
86
+ if __name__ == '__main__':
87
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
88
+ args = parser.parse_args()
89
+
90
+ #==================데이터 불러오기===================
91
+ # 전체 데이터셋
92
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
93
+
94
+ # 전체 데이터셋 메타데이터
95
+ metas = train_dataset.metas
96
+
97
+ #==================json 만들기===================
98
+ entire_json_dict = createJson(train_dataset, metas)
99
+ entire_json = json.dumps(entire_json_dict, indent=4)
100
+
101
+ with open('mbench/sampled_frame.json', mode='w') as file:
102
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250113182817.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
3
+
4
+ from datasets import build_dataset
5
+ import argparse
6
+ import opts
7
+
8
+
9
+ from pathlib import Path
10
+ import os
11
+ from os import path as osp
12
+ import io
13
+
14
+ import numpy as np
15
+ import pandas as pd
16
+ import regex as re
17
+ import json
18
+
19
+ import cv2
20
+ from PIL import Image, ImageDraw
21
+ import torch
22
+ from torchvision.transforms import functional as F
23
+
24
+ from skimage import measure # (pip install scikit-image)
25
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
26
+
27
+ import matplotlib.pyplot as plt
28
+ import matplotlib.patches as patches
29
+ from matplotlib.collections import PatchCollection
30
+ from matplotlib.patches import Rectangle
31
+
32
+
33
+ import ipywidgets as widgets
34
+ from IPython.display import display, clear_output
35
+
36
+ #==================json 만들기===================
37
+ def createJson(train_dataset, metas):
38
+ entire_json = {}
39
+
40
+ #초기화
41
+ data_idx = 0
42
+
43
+ while data_idx < 10:
44
+
45
+ #하나의 비디오에 대해
46
+ video_data = {}
47
+ video_id = metas[data_idx]['video']
48
+ video_data['bins'] = metas[data_idx]['bins']
49
+ annotation_data = []
50
+ frame_names = []
51
+
52
+ while metas[data_idx]['video'] == video_id:
53
+
54
+ obj_id = metas[data_idx]['obj_id']
55
+ sample_id = metas[data_idx]['sample_id']
56
+ sample_frames_id = metas[data_idx]['sample_frames_id']
57
+ sample_frame_idx = sample_frames_id.index(sample_id)
58
+
59
+ frames = metas[data_idx]['frames']
60
+
61
+ frame_name = frames[sample_id]
62
+ cat_name = metas[data_idx]['category']
63
+
64
+ bbox = train_dataset[data_idx][1]['boxes'][sample_frame_idx, :]
65
+
66
+ obj_data = {obj_id: {
67
+ "category_name" : cat_name,
68
+ "bbox": bbox
69
+ }}
70
+
71
+
72
+ annotation_data.append(obj_data)
73
+
74
+ frame_names.append(frame_name)
75
+
76
+ data_idx += 1
77
+
78
+ video_data['annotations'] = annotation_data
79
+ video_data['frame_names'] = frame_names
80
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
81
+
82
+ entire_json[video_id] = video_data
83
+
84
+ return entire_json
85
+
86
+
87
+ if __name__ == '__main__':
88
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
89
+ args = parser.parse_args()
90
+
91
+ #==================데이터 불러오기===================
92
+ # 전체 데이터셋
93
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
94
+
95
+ # 전체 데이터셋 메타데이터
96
+ metas = train_dataset.metas
97
+
98
+ #==================json 만들기===================
99
+ entire_json_dict = createJson(train_dataset, metas)
100
+ entire_json = json.dumps(entire_json_dict, indent=4)
101
+
102
+ with open('mbench/sampled_frame.json', mode='w') as file:
103
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250113182842.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path as osp
3
+ sys.path.append(os.path.abspath(osp.join(osp.dirname(__file__), '..')))
4
+
5
+ from datasets import build_dataset
6
+ import argparse
7
+ import opts
8
+
9
+
10
+ from pathlib import Path
11
+ import io
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+ import regex as re
16
+ import json
17
+
18
+ import cv2
19
+ from PIL import Image, ImageDraw
20
+ import torch
21
+ from torchvision.transforms import functional as F
22
+
23
+ from skimage import measure # (pip install scikit-image)
24
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
25
+
26
+ import matplotlib.pyplot as plt
27
+ import matplotlib.patches as patches
28
+ from matplotlib.collections import PatchCollection
29
+ from matplotlib.patches import Rectangle
30
+
31
+
32
+ import ipywidgets as widgets
33
+ from IPython.display import display, clear_output
34
+
35
+ #==================json 만들기===================
36
+ def createJson(train_dataset, metas):
37
+ entire_json = {}
38
+
39
+ #초기화
40
+ data_idx = 0
41
+
42
+ while data_idx < 10:
43
+
44
+ #하나의 비디오에 대해
45
+ video_data = {}
46
+ video_id = metas[data_idx]['video']
47
+ video_data['bins'] = metas[data_idx]['bins']
48
+ annotation_data = []
49
+ frame_names = []
50
+
51
+ while metas[data_idx]['video'] == video_id:
52
+
53
+ obj_id = metas[data_idx]['obj_id']
54
+ sample_id = metas[data_idx]['sample_id']
55
+ sample_frames_id = metas[data_idx]['sample_frames_id']
56
+ sample_frame_idx = sample_frames_id.index(sample_id)
57
+
58
+ frames = metas[data_idx]['frames']
59
+
60
+ frame_name = frames[sample_id]
61
+ cat_name = metas[data_idx]['category']
62
+
63
+ bbox = train_dataset[data_idx][1]['boxes'][sample_frame_idx, :]
64
+
65
+ obj_data = {obj_id: {
66
+ "category_name" : cat_name,
67
+ "bbox": bbox
68
+ }}
69
+
70
+
71
+ annotation_data.append(obj_data)
72
+
73
+ frame_names.append(frame_name)
74
+
75
+ data_idx += 1
76
+
77
+ video_data['annotations'] = annotation_data
78
+ video_data['frame_names'] = frame_names
79
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
80
+
81
+ entire_json[video_id] = video_data
82
+
83
+ return entire_json
84
+
85
+
86
+ if __name__ == '__main__':
87
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
88
+ args = parser.parse_args()
89
+
90
+ #==================데이터 불러오기===================
91
+ # 전체 데이터셋
92
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
93
+
94
+ # 전체 데이터셋 메타데이터
95
+ metas = train_dataset.metas
96
+
97
+ #==================json 만들기===================
98
+ entire_json_dict = createJson(train_dataset, metas)
99
+ entire_json = json.dumps(entire_json_dict, indent=4)
100
+
101
+ with open('mbench/sampled_frame.json', mode='w') as file:
102
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250113183130.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path as osp
3
+ sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
4
+
5
+ from datasets import build_dataset
6
+ import argparse
7
+ import opts
8
+
9
+
10
+ from pathlib import Path
11
+ import io
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+ import regex as re
16
+ import json
17
+
18
+ import cv2
19
+ from PIL import Image, ImageDraw
20
+ import torch
21
+ from torchvision.transforms import functional as F
22
+
23
+ from skimage import measure # (pip install scikit-image)
24
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
25
+
26
+ import matplotlib.pyplot as plt
27
+ import matplotlib.patches as patches
28
+ from matplotlib.collections import PatchCollection
29
+ from matplotlib.patches import Rectangle
30
+
31
+
32
+ import ipywidgets as widgets
33
+ from IPython.display import display, clear_output
34
+
35
+ #==================json 만들기===================
36
+ def createJson(train_dataset, metas):
37
+ entire_json = {}
38
+
39
+ #초기화
40
+ data_idx = 0
41
+
42
+ while data_idx < 10:
43
+
44
+ #하나의 비디오에 대해
45
+ video_data = {}
46
+ video_id = metas[data_idx]['video']
47
+ video_data['bins'] = metas[data_idx]['bins']
48
+ annotation_data = []
49
+ frame_names = []
50
+
51
+ while metas[data_idx]['video'] == video_id:
52
+
53
+ obj_id = metas[data_idx]['obj_id']
54
+ sample_id = metas[data_idx]['sample_id']
55
+ sample_frames_id = metas[data_idx]['sample_frames_id']
56
+ sample_frame_idx = sample_frames_id.index(sample_id)
57
+
58
+ frames = metas[data_idx]['frames']
59
+
60
+ frame_name = frames[sample_id]
61
+ cat_name = metas[data_idx]['category']
62
+
63
+ bbox = train_dataset[data_idx][1]['boxes'][sample_frame_idx, :]
64
+
65
+ obj_data = {obj_id: {
66
+ "category_name" : cat_name,
67
+ "bbox": bbox
68
+ }}
69
+
70
+
71
+ annotation_data.append(obj_data)
72
+
73
+ frame_names.append(frame_name)
74
+
75
+ data_idx += 1
76
+
77
+ video_data['annotations'] = annotation_data
78
+ video_data['frame_names'] = frame_names
79
+ video_data['video_path'] = osp.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
80
+
81
+ entire_json[video_id] = video_data
82
+
83
+ return entire_json
84
+
85
+
86
+ if __name__ == '__main__':
87
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
88
+ args = parser.parse_args()
89
+
90
+ #==================데이터 불러오기===================
91
+ # 전체 데이터셋
92
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
93
+
94
+ # 전체 데이터셋 메타데이터
95
+ metas = train_dataset.metas
96
+
97
+ #==================json 만들기===================
98
+ entire_json_dict = createJson(train_dataset, metas)
99
+ entire_json = json.dumps(entire_json_dict, indent=4)
100
+
101
+ with open('mbench/sampled_frame.json', mode='w') as file:
102
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250116141513.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path as osp
3
+ sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
4
+
5
+ from datasets import build_dataset
6
+ import argparse
7
+ import opts
8
+
9
+
10
+ from pathlib import Path
11
+ import io
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+ import regex as re
16
+ import json
17
+
18
+ import cv2
19
+ from PIL import Image, ImageDraw
20
+ import torch
21
+ from torchvision.transforms import functional as F
22
+
23
+ from skimage import measure # (pip install scikit-image)
24
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
25
+
26
+ import matplotlib.pyplot as plt
27
+ import matplotlib.patches as patches
28
+ from matplotlib.collections import PatchCollection
29
+ from matplotlib.patches import Rectangle
30
+
31
+
32
+ import ipywidgets as widgets
33
+ from IPython.display import display, clear_output
34
+
35
+ #==================json 만들기===================
36
+ def createJson(train_dataset, metas):
37
+ entire_json = {}
38
+
39
+ #초기화
40
+ vid_idx = 0
41
+
42
+ while vid_idx < len(train_dataset):
43
+
44
+ #하나의 비디오에 대해
45
+ video_data = {}
46
+ video_train_frames, video_train_info = train_dataset[vid_idx]
47
+ video_meta = metas[vid_idx]
48
+
49
+ video_id = video_meta['video']
50
+ video_data['bins'] = video_meta['bins']
51
+ bin_nums = len(video_meta['bins'])
52
+ obj_nums = len(list(video_meta['obj_id_cat'].keys()))
53
+
54
+ annotation_data = []
55
+ frame_names = []
56
+
57
+ for i in range(bin_nums):
58
+ bin_data = {}
59
+ for j in range(obj_nums):
60
+ obj_id = str(j+1)
61
+ obj_data = {
62
+ "category_name":video_meta['obj_id_cat'][obj_id],
63
+ "bbox":video_train_info['boxes'][i*obj_nums+j, :]
64
+ }
65
+ bin_data[obj_id] = obj_data
66
+ annotation_data.append(bin_data)
67
+
68
+ video_data['annotations'] = annotation_data
69
+
70
+
71
+ sample_indx = metas[vid_idx]['sample_indx']
72
+ frames = metas[vid_idx]['frames']
73
+ for i in sample_indx:
74
+ frame_name = frames[i]
75
+ frame_names.append(frame_name)
76
+
77
+ video_data['frame_names'] = frame_names
78
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
79
+ entire_json[video_id] = video_data
80
+
81
+ vid_idx += 1
82
+
83
+ return entire_json
84
+
85
+
86
+ if __name__ == '__main__':
87
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
88
+ args = parser.parse_args()
89
+
90
+ #==================데이터 불러오기===================
91
+ # 전체 데이터셋
92
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
93
+
94
+ # 전체 데이터셋 메타데이터
95
+ metas = train_dataset.metas
96
+
97
+ #==================json 만들기===================
98
+ entire_json_dict = createJson(train_dataset, metas)
99
+ print(type(entire_json_dict))
100
+ entire_json = json.dumps(entire_json_dict, indent=4)
101
+
102
+ with open('mbench/sampled_frame.json', mode='w') as file:
103
+ file.write(entire_json)
.history/mbench/make_ref-ytvos_json_20250118024325.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from os import path as osp
4
+ sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
5
+
6
+ from datasets import build_dataset
7
+ import argparse
8
+ import opts
9
+
10
+
11
+ from pathlib import Path
12
+ import io
13
+
14
+ import numpy as np
15
+ import pandas as pd
16
+ import regex as re
17
+ import json
18
+
19
+ import cv2
20
+ from PIL import Image, ImageDraw
21
+ import torch
22
+ from torchvision.transforms import functional as F
23
+
24
+ from skimage import measure # (pip install scikit-image)
25
+ from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
26
+
27
+ import matplotlib.pyplot as plt
28
+ import matplotlib.patches as patches
29
+ from matplotlib.collections import PatchCollection
30
+ from matplotlib.patches import Rectangle
31
+
32
+
33
+ import ipywidgets as widgets
34
+ from IPython.display import display, clear_output
35
+
36
+ #==================json 만들기===================
37
+ def createJson(train_dataset, metas):
38
+ entire_json = {}
39
+
40
+ #초기화
41
+ vid_idx = 0
42
+
43
+ while vid_idx < len(train_dataset):
44
+
45
+ #하나의 비디오에 대해
46
+ video_data = {}
47
+ video_train_frames, video_train_info = train_dataset[vid_idx]
48
+ video_meta = metas[vid_idx]
49
+
50
+ video_id = video_meta['video']
51
+ video_data['bins'] = video_meta['bins']
52
+ bin_nums = len(video_meta['bins'])
53
+ obj_nums = max([int(k) for k in list(video_meta['obj_id_cat'].keys())])
54
+
55
+ annotation_data = []
56
+ frame_names = []
57
+
58
+ for i in range(bin_nums):
59
+ bin_data = {}
60
+ for j in range(obj_nums):
61
+ obj_id = str(j+1)
62
+ try:
63
+ obj_data = {
64
+ "category_name":video_meta['obj_id_cat'][obj_id],
65
+ "bbox":video_train_info['boxes'][i*obj_nums+j, :].tolist(),
66
+ "valid":video_train_info['valid'][i*obj_nums+j].item()
67
+ }
68
+ except:
69
+ obj_data = {}
70
+ bin_data[obj_id] = obj_data
71
+ annotation_data.append(bin_data)
72
+
73
+ video_data['annotations'] = annotation_data
74
+
75
+
76
+ sample_indx = metas[vid_idx]['sample_indx']
77
+ frames = metas[vid_idx]['frames']
78
+ for i in sample_indx:
79
+ frame_name = frames[i]
80
+ frame_names.append(frame_name)
81
+
82
+ video_data['frame_names'] = frame_names
83
+ video_data['video_path'] = os.path.join(str(train_dataset.img_folder), 'JPEGImages', video_id)
84
+ entire_json[video_id] = video_data
85
+
86
+ vid_idx += 1
87
+
88
+ return entire_json
89
+
90
+
91
+ if __name__ == '__main__':
92
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
93
+ args = parser.parse_args()
94
+
95
+ #==================데이터 불러오기===================
96
+ # 전체 데이터셋
97
+ train_dataset = build_dataset('ytvos_ref', image_set = 'train', args = args)
98
+
99
+ # 전체 데이터셋 메타데이터
100
+ metas = train_dataset.metas
101
+
102
+ #==================json 만들기===================
103
+ entire_json_dict = createJson(train_dataset, metas)
104
+ print(type(entire_json_dict))
105
+ entire_json = json.dumps(entire_json_dict, indent=4)
106
+
107
+ with open('mbench/sampled_frame2.json', mode='w') as file:
108
+ file.write(entire_json)
.history/mbench/ytvos_ref_20250121152309.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ref-YoutubeVOS data loader
3
+ """
4
+ from pathlib import Path
5
+
6
+ import torch
7
+ from torch.utils.data import Dataset
8
+
9
+ import os
10
+ from PIL import Image
11
+ import json
12
+ import numpy as np
13
+ import random
14
+
15
+ # from datasets.categories import ytvos_category_dict as category_dict
16
+
17
+
18
+ category_dict = {
19
+ 'airplane': 0, 'ape': 1, 'bear': 2, 'bike': 3, 'bird': 4, 'boat': 5, 'bucket': 6, 'bus': 7, 'camel': 8, 'cat': 9,
20
+ 'cow': 10, 'crocodile': 11, 'deer': 12, 'dog': 13, 'dolphin': 14, 'duck': 15, 'eagle': 16, 'earless_seal': 17,
21
+ 'elephant': 18, 'fish': 19, 'fox': 20, 'frisbee': 21, 'frog': 22, 'giant_panda': 23, 'giraffe': 24, 'hand': 25,
22
+ 'hat': 26, 'hedgehog': 27, 'horse': 28, 'knife': 29, 'leopard': 30, 'lion': 31, 'lizard': 32, 'monkey': 33,
23
+ 'motorbike': 34, 'mouse': 35, 'others': 36, 'owl': 37, 'paddle': 38, 'parachute': 39, 'parrot': 40, 'penguin': 41,
24
+ 'person': 42, 'plant': 43, 'rabbit': 44, 'raccoon': 45, 'sedan': 46, 'shark': 47, 'sheep': 48, 'sign': 49,
25
+ 'skateboard': 50, 'snail': 51, 'snake': 52, 'snowboard': 53, 'squirrel': 54, 'surfboard': 55, 'tennis_racket': 56,
26
+ 'tiger': 57, 'toilet': 58, 'train': 59, 'truck': 60, 'turtle': 61, 'umbrella': 62, 'whale': 63, 'zebra': 64
27
+ }
28
+
29
+
30
+
31
+ class YTVOSDataset(Dataset):
32
+ """
33
+ A dataset class for the Refer-Youtube-VOS dataset which was first introduced in the paper:
34
+ "URVOS: Unified Referring Video Object Segmentation Network with a Large-Scale Benchmark"
35
+ (see https://link.springer.com/content/pdf/10.1007/978-3-030-58555-6_13.pdf).
36
+ The original release of the dataset contained both 'first-frame' and 'full-video' expressions. However, the first
37
+ dataset is not publicly available anymore as now only the harder 'full-video' subset is available to download
38
+ through the Youtube-VOS referring video object segmentation competition page at:
39
+ https://competitions.codalab.org/competitions/29139
40
+ Furthermore, for the competition the subset's original validation set, which consists of 507 videos, was split into
41
+ two competition 'validation' & 'test' subsets, consisting of 202 and 305 videos respectively. Evaluation can
42
+ currently only be done on the competition 'validation' subset using the competition's server, as
43
+ annotations were publicly released only for the 'train' subset of the competition.
44
+
45
+ """
46
+ def __init__(self, img_folder: Path, ann_file: Path, transforms, return_masks: bool,
47
+ num_frames: int, max_skip: int):
48
+ self.img_folder = img_folder
49
+ self.ann_file = ann_file
50
+ self._transforms = transforms
51
+ self.return_masks = return_masks # not used
52
+ self.num_frames = num_frames
53
+ self.max_skip = max_skip
54
+ # create video meta data
55
+ self.prepare_metas()
56
+
57
+ print('\n video num: ', len(self.videos), ' clip num: ', len(self.metas))
58
+ print('\n')
59
+
60
+ def prepare_metas(self):
61
+ # read object information
62
+ with open(os.path.join(str(self.img_folder), 'meta.json'), 'r') as f:
63
+ subset_metas_by_video = json.load(f)['videos']
64
+
65
+ # read expression data
66
+ with open(str(self.ann_file), 'r') as f:
67
+ subset_expressions_by_video = json.load(f)['videos']
68
+ self.videos = list(subset_expressions_by_video.keys())
69
+
70
+ self.metas = []
71
+ skip_vid_count = 0
72
+
73
+ for vid in self.videos:
74
+ vid_meta = subset_metas_by_video[vid]
75
+ vid_data = subset_expressions_by_video[vid]
76
+ vid_frames = sorted(vid_data['frames'])
77
+ vid_len = len(vid_frames)
78
+
79
+ if vid_len < 11:
80
+ #print(f"Too short video: {vid} with frame length {vid_len}")
81
+ skip_vid_count += 1
82
+ continue
83
+
84
+
85
+ # Exclude start_idx (0, 1) and end_idx (vid_len-1, vid_len-2)
86
+ start_idx , end_idx = 2, vid_len-2
87
+ bin_size = (end_idx - start_idx) // 4
88
+
89
+ bins = []
90
+ for i in range(4):
91
+ bin_start = start_idx + i * bin_size
92
+ bin_end = bin_start + bin_size if i < 3 else end_idx
93
+
94
+ bins.append((bin_start, bin_end))
95
+
96
+ # Random sample one frame from each bin
97
+ sample_indx = []
98
+ for start_idx, end_idx in bins:
99
+ sample_indx.append(random.randint(start_idx, end_idx - 1))
100
+ sample_indx.sort() # Ensure indices are in order
101
+
102
+
103
+ meta = {
104
+ 'video':vid,
105
+ 'sample_indx':sample_indx,
106
+ 'bins':bins,
107
+ 'frames':vid_frames
108
+ }
109
+ obj_id_cat = {}
110
+ for exp_id, exp_dict in vid_data['expressions'].items():
111
+ obj_id = exp_dict['obj_id']
112
+ if obj_id not in obj_id_cat:
113
+ obj_id_cat[obj_id] = vid_meta['objects'][obj_id]['category']
114
+ meta['obj_id_cat'] = obj_id_cat
115
+ self.metas.append(meta)
116
+
117
+ print(f"skipped {skip_vid_count} short videos")
118
+
119
+
120
+ @staticmethod
121
+ def bounding_box(img):
122
+ rows = np.any(img, axis=1)
123
+ cols = np.any(img, axis=0)
124
+ rmin, rmax = np.where(rows)[0][[0, -1]]
125
+ cmin, cmax = np.where(cols)[0][[0, -1]]
126
+ return rmin, rmax, cmin, cmax # y1, y2, x1, x2
127
+
128
+ def __len__(self):
129
+ return len(self.metas)
130
+
131
+ def __getitem__(self, idx):
132
+ meta = self.metas[idx] # dict
133
+
134
+ video, sample_indx, bins, frames, obj_id_cat = \
135
+ meta['video'], meta['sample_indx'], meta['bins'], meta['frames'], meta['obj_id_cat']
136
+
137
+ # read frames and masks
138
+ annos = {}
139
+ imgs, labels, boxes, masks, valid = [], [], [], [], []
140
+ for frame_indx in sample_indx:
141
+ frame_name = frames[frame_indx]
142
+ img_path = os.path.join(str(self.img_folder), 'JPEGImages', video, frame_name + '.jpg')
143
+ mask_path = os.path.join(str(self.img_folder), 'Annotations', video, frame_name + '.png')
144
+ img = Image.open(img_path).convert('RGB')
145
+ imgs.append(img)
146
+
147
+ mask = Image.open(mask_path).convert('P')
148
+ mask = np.array(mask)
149
+
150
+ frame_annotations = {}
151
+
152
+ # create the target
153
+ for obj_id in list(obj_id_cat.keys()):
154
+ obj_mask = (mask==int(obj_id)).astype(np.float32) # 0,1 binary
155
+ if (obj_mask > 0).any():
156
+ y1, y2, x1, x2 = self.bounding_box(obj_mask)
157
+ box = torch.tensor([x1, y1, x2, y2]).to(torch.float)
158
+ valid.append(1)
159
+ val = 1
160
+ else: # some frame didn't contain the instance
161
+ box = torch.tensor([0, 0, 0, 0]).to(torch.float)
162
+ valid.append(0)
163
+ val = 0
164
+ obj_mask = torch.from_numpy(obj_mask)
165
+
166
+ # append
167
+ masks.append(obj_mask)
168
+ boxes.append(box)
169
+
170
+ frame_annotations[obj_id] = {
171
+ 'category_name': obj_id_cat[obj_id],
172
+ 'bbox': box,
173
+ 'valid' : val,
174
+ 'mask': obj_mask
175
+ }
176
+
177
+ annos[frame_indx] = frame_annotations
178
+
179
+
180
+ # transform
181
+ w, h = img.size
182
+ boxes = torch.stack(boxes, dim=0)
183
+ boxes[:, 0::2].clamp_(min=0, max=w)
184
+ boxes[:, 1::2].clamp_(min=0, max=h)
185
+ masks = torch.stack(masks, dim=0)
186
+ target = {
187
+ 'frames_idx': sample_indx, # [T,]
188
+ 'boxes': boxes, # [T, 4], xyxy
189
+ 'masks': masks, # [T, H, W]
190
+ 'valid': torch.tensor(valid), # [T,]
191
+ 'obj_ids' : list(obj_id_cat.keys()),
192
+ 'orig_size': torch.as_tensor([int(h), int(w)]),
193
+ 'size': torch.as_tensor([int(h), int(w)])
194
+ }
195
+
196
+ # "boxes" normalize to [0, 1] and transform from xyxy to cxcywh in self._transform
197
+ # if self._transforms:
198
+ # imgs, target = self._transforms(imgs, target)
199
+ # imgs = torch.stack(imgs, dim=0) # [T, 3, H, W]
200
+ # else:
201
+ imgs = np.array(imgs)
202
+ imgs = torch.tensor(imgs.transpose(0, 3, 1, 2))
203
+
204
+
205
+ # # FIXME: handle "valid", since some box may be removed due to random crop
206
+ # if torch.any(target['valid'] == 1): # at leatst one instance
207
+ # instance_check = True
208
+ # else:
209
+ # idx = random.randint(0, self.__len__() - 1)
210
+
211
+ return imgs, target, annos
212
+
213
+
214
+ def make_coco_transforms(image_set, max_size=640):
215
+ normalize = T.Compose([
216
+ T.ToTensor(),
217
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
218
+ ])
219
+
220
+ scales = [288, 320, 352, 392, 416, 448, 480, 512]
221
+
222
+ if image_set == 'train':
223
+ return T.Compose([
224
+ T.RandomHorizontalFlip(),
225
+ T.PhotometricDistort(),
226
+ T.RandomSelect(
227
+ T.Compose([
228
+ T.RandomResize(scales, max_size=max_size),
229
+ T.Check(),
230
+ ]),
231
+ T.Compose([
232
+ T.RandomResize([400, 500, 600]),
233
+ T.RandomSizeCrop(384, 600),
234
+ T.RandomResize(scales, max_size=max_size),
235
+ T.Check(),
236
+ ])
237
+ ),
238
+ normalize,
239
+ ])
240
+
241
+ # we do not use the 'val' set since the annotations are inaccessible
242
+ if image_set == 'val':
243
+ return T.Compose([
244
+ T.RandomResize([360], max_size=640),
245
+ normalize,
246
+ ])
247
+
248
+ raise ValueError(f'unknown {image_set}')
249
+
250
+
251
+ def build(image_set, args):
252
+ root = Path(args.ytvos_path)
253
+ assert root.exists(), f'provided YTVOS path {root} does not exist'
254
+ PATHS = {
255
+ "train": (root / "train", root / "meta_expressions" / "train" / "meta_expressions.json"),
256
+ "val": (root / "valid", root / "meta_expressions" / "valid" / "meta_expressions.json"), # not used actually
257
+ }
258
+ img_folder, ann_file = PATHS[image_set]
259
+ # dataset = YTVOSDataset(img_folder, ann_file, transforms=make_coco_transforms(image_set, max_size=args.max_size), return_masks=args.masks,
260
+ # num_frames=args.num_frames, max_skip=args.max_skip)
261
+ dataset = YTVOSDataset(img_folder, ann_file, transforms=None, return_masks=args.masks,
262
+ num_frames=args.num_frames, max_skip=args.max_skip)
263
+ return dataset
264
+
.history/mbench_a2d/gpt_a2d_numbered_20250205111640.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import build_dataset
2
+ import argparse
3
+ import opts
4
+
5
+ import sys
6
+ import os
7
+ import time
8
+
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
+ import cv2
12
+ from io import BytesIO
13
+ import base64
14
+ from PIL import Image
15
+
16
+ from openai import OpenAI
17
+
18
+ def mark_object_and_encode(frame, mask, instance_id, text_query, color_mask=False, label_number=False):
19
+ #마스크 색칠할지
20
+ if color_mask == True:
21
+ alpha = 0.1
22
+
23
+ colored_mask = np.zeros_like(frame)
24
+ colored_mask[mask == 1] = [255, 0, 0]
25
+ frame[mask == 1] = (
26
+ (1 - alpha) * frame[mask == 1] +
27
+ alpha * colored_mask[mask == 1]
28
+ )
29
+
30
+ #마스크 아웃라인 그리기
31
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
32
+ cv2.drawContours(frame, contours, -1, [255, 0, 0], 2)
33
+
34
+ #instance_id 적을지
35
+ if label_number == True:
36
+ if len(contours) > 0:
37
+ largest_contour = max(contours, key=cv2.contourArea)
38
+ M = cv2.moments(largest_contour)
39
+ if M["m00"] != 0:
40
+ center_x = int(M["m10"] / M["m00"])
41
+ center_y = int(M["m01"] / M["m00"])
42
+ else:
43
+ center_x, center_y = 0, 0
44
+
45
+ font = cv2.FONT_HERSHEY_SIMPLEX
46
+ text = str(instance_id)
47
+ font_scale = 0.6
48
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
49
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
50
+ text_y = center_y
51
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
52
+
53
+ # 텍스트 배경 사각형 좌표 계산
54
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
55
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
56
+ rect_end = (text_x + text_size[0] + 5, text_y)
57
+
58
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
59
+ cv2.putText(frame, text, (text_x, text_y), font, font_scale, (255, 255, 255), 2)
60
+
61
+ # plt.figure(figsize=(6, 10))
62
+ # plt.imshow(frame)
63
+ # plt.title(text_query)
64
+ # plt.tight_layout()
65
+ # plt.axis('off')
66
+ # plt.show()
67
+
68
+ buffer = BytesIO()
69
+ frame = Image.fromarray(frame)
70
+ frame.save(buffer, format='jpeg')
71
+ buffer.seek(0)
72
+ encoded_frame = base64.b64encode(buffer.read()).decode("utf-8")
73
+
74
+ return encoded_frame
75
+
76
+
77
+ if __name__ == "__main__":
78
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
79
+ args = parser.parse_args()
80
+
81
+ train_dataset = build_dataset('a2d', image_set = 'train', args = args)
82
+ text_annotations = train_dataset.text_annotations
.history/mbench_a2d/gpt_a2d_numbered_20250205122340.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import build_dataset
2
+ import argparse
3
+ import opts
4
+
5
+ import sys
6
+ import os
7
+ import time
8
+
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
+ import cv2
12
+ from io import BytesIO
13
+ import base64
14
+ from PIL import Image
15
+ import json
16
+
17
+ from openai import OpenAI
18
+
19
+ def mark_object_and_encode(frame, mask, instance_id, text_query, color_mask=False, label_number=False):
20
+ #마스크 색칠할지
21
+ if color_mask == True:
22
+ alpha = 0.1
23
+
24
+ colored_mask = np.zeros_like(frame)
25
+ colored_mask[mask == 1] = [255, 0, 0]
26
+ frame[mask == 1] = (
27
+ (1 - alpha) * frame[mask == 1] +
28
+ alpha * colored_mask[mask == 1]
29
+ )
30
+
31
+ #마스크 아웃라인 그리기
32
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
33
+ cv2.drawContours(frame, contours, -1, [255, 0, 0], 2)
34
+
35
+ #instance_id 적을지
36
+ if label_number == True:
37
+ if len(contours) > 0:
38
+ largest_contour = max(contours, key=cv2.contourArea)
39
+ M = cv2.moments(largest_contour)
40
+ if M["m00"] != 0:
41
+ center_x = int(M["m10"] / M["m00"])
42
+ center_y = int(M["m01"] / M["m00"])
43
+ else:
44
+ center_x, center_y = 0, 0
45
+
46
+ font = cv2.FONT_HERSHEY_SIMPLEX
47
+ text = str(instance_id)
48
+ font_scale = 0.6
49
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
50
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
51
+ text_y = center_y
52
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
53
+
54
+ # 텍스트 배경 사각형 좌표 계산
55
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
56
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
57
+ rect_end = (text_x + text_size[0] + 5, text_y)
58
+
59
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
60
+ cv2.putText(frame, text, (text_x, text_y), font, font_scale, (255, 255, 255), 2)
61
+
62
+ # plt.figure(figsize=(6, 10))
63
+ # plt.imshow(frame)
64
+ # plt.title(text_query)
65
+ # plt.tight_layout()
66
+ # plt.axis('off')
67
+ # plt.show()
68
+
69
+ buffer = BytesIO()
70
+ frame = Image.fromarray(frame)
71
+ frame.save(buffer, format='jpeg')
72
+ buffer.seek(0)
73
+ encoded_frame = base64.b64encode(buffer.read()).decode("utf-8")
74
+
75
+ return encoded_frame
76
+
77
+ def getCaption(frame, mask, instance_id, text_query, model='gpt-4o', color_mask=False, label_number=True):
78
+
79
+ base64_image = mark_object_and_encode(frame, mask, instance_id, text_query, color_mask, label_number)
80
+
81
+ captioner = OpenAI()
82
+
83
+ #필터링하지 않고 바로 ref exp 만들기
84
+ dense_caption_prompt = f"""
85
+ You are a visual assistant analyzing a single frame of a video.
86
+ In the given frame, I labeled 1 object by marking each with a bright numeric ID at the center and its boundary.
87
+ I also give you a text query describing the marked object.
88
+ I want to use your expression to create an **action-centric referring expression** dataset.
89
+ Based on the frame and text query, please describe the marked object using **clearly observable** and **specific** actions
90
+ ---
91
+ ## Guidelines:
92
+ 1. **Focus on visible, prominent actions** only (e.g., running, pushing, grasping an object).
93
+ 2. **Avoid describing minor or ambiguous actions** (e.g., "slightly moving a paw", "slightly tilting head").
94
+ 3. **Do not include subjective or speculative descriptions** (e.g., “it seems excited” or “it might be preparing to jump”).
95
+ 4. **Avoid vague expressions** like "interacting with something" or "engaging with another object." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
96
+ 5. **Use dynamic action verbs** (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
97
+ 6. If there are multiple objects, ensure the description for the marked object **differentiates** its action.
98
+ 7. Base your description on these action definitions:
99
+ - Avoid using term 'minimal' or 'slightly'.
100
+ - General body movement, body position, or pattern which is prominent. (e.g. "lifting head up", "facing towards", "showing its back")
101
+ - details such as motion and intention, facial with object manipulation
102
+ - movements with object or other entities when they are prominent and observable. expression should be specific.
103
+ (e.g., "pushing another person" (O), "engaging with someone" (X) "interacting with another person" (X))
104
+ --
105
+ ## Output Format:
106
+ - For each labeled object, output **exactly one line**. Your answer should contain details and follow the following format :
107
+ object id. action-oriented description
108
+ (e.g. 1. the person is holding ski poles and skiing on a snow mountain, with his two legs bent forward.)
109
+ ### Example
110
+ If the frame has 1 labeled bear, your output should look like:
111
+ 1. the bear reaching his right arm while leaning forward to capture the prey
112
+ ---
113
+ **Do not include** appearance details (e.g., color, size, texture) or relative positioning (e.g., “on the left/right”).
114
+ **Do not include object IDs** or reference them (e.g., "Person 1" or "object 2" is not allowed).
115
+ **Do not include markdown** in the output.
116
+ Keep in mind that you should not group the object, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
117
+ For each labeled object, output referring expressions for each object id.
118
+ """
119
+ prompt_with_text_query = f"prompt: {dense_caption_prompt}\n text query: {text_query}"
120
+
121
+ MAX_RETRIES = 2
122
+ retry_count = 0
123
+
124
+ while retry_count < MAX_RETRIES:
125
+ response = captioner.chat.completions.create(
126
+ model=model,
127
+ messages=[
128
+ {
129
+ "role": "user",
130
+ "content": [
131
+ {
132
+ "type": "text",
133
+ "text": prompt_with_text_query,
134
+ },
135
+ {
136
+ "type": "image_url",
137
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
138
+ },
139
+ ],
140
+ }
141
+ ],
142
+ )
143
+
144
+
145
+ caption = response.choices[0].message.content.strip()
146
+ caption_lower = caption.lower().lstrip()
147
+ if caption_lower.startswith("1.") and not any(
148
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
149
+ ):
150
+ break
151
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
152
+ retry_count += 1
153
+ time.sleep(2)
154
+
155
+ if retry_count == MAX_RETRIES:
156
+ caption = None
157
+ print("Max retries reached. Caption generation failed.")
158
+
159
+ else:
160
+ caption = None
161
+
162
+ return caption
163
+
164
+ if __name__ == "__main__":
165
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
166
+ parser.add_argument('--save_caption_path', type=str, default='mbench_a2d/numbered_captions.json')
167
+ args = parser.parse_args()
168
+
169
+ train_dataset = build_dataset('a2d', image_set = 'train', args = args)
170
+ text_annotations = train_dataset.text_annotations
171
+
172
+ all_captions = {}
173
+
174
+ os.environ['OPENAI_API_KEY'] = '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'
175
+
176
+ for idx in range(100):
177
+ imgs, target = train_dataset[idx]
178
+ frames_idx = target['frames_idx'].tolist()
179
+ text_query, vid_id, frame_id, instance_id = text_annotations[idx]
180
+
181
+ frame_id = frame_id - 1
182
+ frame_order = frames_idx.index(frame_id)
183
+
184
+ frame = imgs[frame_order, :, :, :].permute(1, 2, 0).numpy()
185
+ mask = target['masks'].numpy().astype(np.uint8).squeeze()
186
+
187
+ caption = getCaption(frame, mask, instance_id, text_query)
188
+ if vid_id not in all_captions:
189
+ all_captions[vid_id] = {frame_id : caption}
190
+ else:
191
+ all_captions[vid_id][frame_id] = caption
192
+
193
+
194
+ with open(args.save_caption_path, 'w') as file:
195
+ json.dump(all_captions, file, indent=4)
196
+
.history/mbench_a2d/gpt_a2d_numbered_20250205152326.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+
5
+ from datasets import build_dataset
6
+ import argparse
7
+ import opts
8
+ import time
9
+
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import cv2
13
+ from io import BytesIO
14
+ import base64
15
+ from PIL import Image
16
+ import json
17
+
18
+ from openai import OpenAI
19
+
20
+ def mark_object_and_encode(frame, mask, instance_id, text_query, color_mask=False, label_number=False):
21
+ #마스크 색칠할지
22
+ if color_mask == True:
23
+ alpha = 0.1
24
+
25
+ colored_mask = np.zeros_like(frame)
26
+ colored_mask[mask == 1] = [255, 0, 0]
27
+ frame[mask == 1] = (
28
+ (1 - alpha) * frame[mask == 1] +
29
+ alpha * colored_mask[mask == 1]
30
+ )
31
+
32
+ #마스크 아웃라인 그리기
33
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
34
+ cv2.drawContours(frame, contours, -1, [255, 0, 0], 2)
35
+
36
+ #instance_id 적을지
37
+ if label_number == True:
38
+ if len(contours) > 0:
39
+ largest_contour = max(contours, key=cv2.contourArea)
40
+ M = cv2.moments(largest_contour)
41
+ if M["m00"] != 0:
42
+ center_x = int(M["m10"] / M["m00"])
43
+ center_y = int(M["m01"] / M["m00"])
44
+ else:
45
+ center_x, center_y = 0, 0
46
+
47
+ font = cv2.FONT_HERSHEY_SIMPLEX
48
+ text = str(instance_id)
49
+ font_scale = 0.6
50
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
51
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
52
+ text_y = center_y
53
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
54
+
55
+ # 텍스트 배경 사각형 좌표 계산
56
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
57
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
58
+ rect_end = (text_x + text_size[0] + 5, text_y)
59
+
60
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
61
+ cv2.putText(frame, text, (text_x, text_y), font, font_scale, (255, 255, 255), 2)
62
+
63
+ # plt.figure(figsize=(6, 10))
64
+ # plt.imshow(frame)
65
+ # plt.title(text_query)
66
+ # plt.tight_layout()
67
+ # plt.axis('off')
68
+ # plt.show()
69
+
70
+ buffer = BytesIO()
71
+ frame = Image.fromarray(frame)
72
+ frame.save(buffer, format='jpeg')
73
+ buffer.seek(0)
74
+ encoded_frame = base64.b64encode(buffer.read()).decode("utf-8")
75
+
76
+ return encoded_frame
77
+
78
+ def getCaption(frame, mask, instance_id, text_query, model='gpt-4o', color_mask=False, label_number=True):
79
+
80
+ base64_image = mark_object_and_encode(frame, mask, instance_id, text_query, color_mask, label_number)
81
+
82
+ captioner = OpenAI()
83
+
84
+ #필터링하지 않고 바로 ref exp 만들기
85
+ dense_caption_prompt = f"""
86
+ You are a visual assistant analyzing a single frame of a video.
87
+ In the given frame, I labeled 1 object by marking each with a bright numeric ID at the center and its boundary.
88
+ I also give you a text query describing the marked object.
89
+ I want to use your expression to create an **action-centric referring expression** dataset.
90
+ Based on the frame and text query, please describe the marked object using **clearly observable** and **specific** actions
91
+ ---
92
+ ## Guidelines:
93
+ 1. **Focus on visible, prominent actions** only (e.g., running, pushing, grasping an object).
94
+ 2. **Avoid describing minor or ambiguous actions** (e.g., "slightly moving a paw", "slightly tilting head").
95
+ 3. **Do not include subjective or speculative descriptions** (e.g., “it seems excited” or “it might be preparing to jump”).
96
+ 4. **Avoid vague expressions** like "interacting with something" or "engaging with another object." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
97
+ 5. **Use dynamic action verbs** (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
98
+ 6. If there are multiple objects, ensure the description for the marked object **differentiates** its action.
99
+ 7. Base your description on these action definitions:
100
+ - Avoid using term 'minimal' or 'slightly'.
101
+ - General body movement, body position, or pattern which is prominent. (e.g. "lifting head up", "facing towards", "showing its back")
102
+ - details such as motion and intention, facial with object manipulation
103
+ - movements with object or other entities when they are prominent and observable. expression should be specific.
104
+ (e.g., "pushing another person" (O), "engaging with someone" (X) "interacting with another person" (X))
105
+ --
106
+ ## Output Format:
107
+ - For each labeled object, output **exactly one line**. Your answer should contain details and follow the following format :
108
+ object id. action-oriented description
109
+ (e.g. 1. the person is holding ski poles and skiing on a snow mountain, with his two legs bent forward.)
110
+ ### Example
111
+ If the frame has 1 labeled bear, your output should look like:
112
+ 1. the bear reaching his right arm while leaning forward to capture the prey
113
+ ---
114
+ **Do not include** appearance details (e.g., color, size, texture) or relative positioning (e.g., “on the left/right”).
115
+ **Do not include object IDs** or reference them (e.g., "Person 1" or "object 2" is not allowed).
116
+ **Do not include markdown** in the output.
117
+ Keep in mind that you should not group the object, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
118
+ For each labeled object, output referring expressions for each object id.
119
+ """
120
+ prompt_with_text_query = f"prompt: {dense_caption_prompt}\n text query: {text_query}"
121
+
122
+ MAX_RETRIES = 2
123
+ retry_count = 0
124
+
125
+ while retry_count < MAX_RETRIES:
126
+ response = captioner.chat.completions.create(
127
+ model=model,
128
+ messages=[
129
+ {
130
+ "role": "user",
131
+ "content": [
132
+ {
133
+ "type": "text",
134
+ "text": prompt_with_text_query,
135
+ },
136
+ {
137
+ "type": "image_url",
138
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
139
+ },
140
+ ],
141
+ }
142
+ ],
143
+ )
144
+
145
+
146
+ caption = response.choices[0].message.content.strip()
147
+ caption_lower = caption.lower().lstrip()
148
+ if caption_lower.startswith("1.") and not any(
149
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
150
+ ):
151
+ break
152
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
153
+ retry_count += 1
154
+ time.sleep(2)
155
+
156
+ if retry_count == MAX_RETRIES:
157
+ caption = None
158
+ print("Max retries reached. Caption generation failed.")
159
+
160
+ else:
161
+ caption = None
162
+
163
+ return caption
164
+
165
+ if __name__ == "__main__":
166
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
167
+ parser.add_argument('--save_caption_path', type=str, default='mbench_a2d/numbered_captions.json')
168
+ args = parser.parse_args()
169
+
170
+ train_dataset = build_dataset('a2d', image_set = 'train', args = args)
171
+ text_annotations = train_dataset.text_annotations
172
+
173
+ all_captions = {}
174
+
175
+ #os.environ['OPENAI_API_KEY'] = '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'
176
+ os.environ['OPENAI_API_KEY'] = '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'
177
+
178
+ for idx in range(100):
179
+ imgs, target = train_dataset[idx]
180
+ frames_idx = target['frames_idx'].tolist()
181
+ text_query, vid_id, frame_id, instance_id = text_annotations[idx]
182
+ print(f"------------vid id: {vid_id}, frame id: {frame_id}", flush=True)
183
+
184
+ frame_id = frame_id - 1
185
+ frame_order = frames_idx.index(frame_id)
186
+
187
+ frame = imgs[frame_order, :, :, :].permute(1, 2, 0).numpy()
188
+ mask = target['masks'].numpy().astype(np.uint8).squeeze()
189
+
190
+ caption = getCaption(frame, mask, instance_id, text_query)
191
+ if vid_id not in all_captions:
192
+ all_captions[vid_id] = {frame_id : caption}
193
+ else:
194
+ all_captions[vid_id][frame_id] = caption
195
+
196
+ print("Finished!", flush=True)
197
+
198
+ with open(args.save_caption_path, 'w') as file:
199
+ json.dump(all_captions, file, indent=4)
200
+
.history/mbench_a2d/gpt_a2d_numbered_20250207110257.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+
5
+ from datasets import build_dataset
6
+ import argparse
7
+ import opts
8
+ import time
9
+
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import cv2
13
+ from io import BytesIO
14
+ import base64
15
+ from PIL import Image
16
+ import json
17
+
18
+ from openai import OpenAI
19
+
20
+ def mark_object_and_encode(frame, mask, instance_id, text_query, color_mask=False, label_number=False):
21
+ #마스크 색칠할지
22
+ if color_mask == True:
23
+ alpha = 0.1
24
+
25
+ colored_mask = np.zeros_like(frame)
26
+ colored_mask[mask == 1] = [255, 0, 0]
27
+ frame[mask == 1] = (
28
+ (1 - alpha) * frame[mask == 1] +
29
+ alpha * colored_mask[mask == 1]
30
+ )
31
+
32
+ #마스크 아웃라인 그리기
33
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
34
+ cv2.drawContours(frame, contours, -1, [255, 0, 0], 2)
35
+
36
+ #instance_id 적을지
37
+ if label_number == True:
38
+ if len(contours) > 0:
39
+ largest_contour = max(contours, key=cv2.contourArea)
40
+ M = cv2.moments(largest_contour)
41
+ if M["m00"] != 0:
42
+ center_x = int(M["m10"] / M["m00"])
43
+ center_y = int(M["m01"] / M["m00"])
44
+ else:
45
+ center_x, center_y = 0, 0
46
+
47
+ font = cv2.FONT_HERSHEY_SIMPLEX
48
+ text = str(instance_id)
49
+ font_scale = 0.6
50
+ text_size = cv2.getTextSize(text, font, font_scale, 2)[0]
51
+ text_x = center_x - text_size[0] // 1 # 텍스트의 가로 중심
52
+ text_y = center_y
53
+ # text_y = center_y + text_size[1] // 2 # 텍스트의 세로 중심
54
+
55
+ # 텍스트 배경 사각형 좌표 계산
56
+ rect_start = (text_x - 5, text_y - text_size[1] - 5) # 배경 사각형 좌상단
57
+ # rect_end = (text_x + text_size[0] + 5, text_y + 5)
58
+ rect_end = (text_x + text_size[0] + 5, text_y)
59
+
60
+ cv2.rectangle(frame, rect_start, rect_end, (0, 0, 0), -1)
61
+ cv2.putText(frame, text, (text_x, text_y), font, font_scale, (255, 255, 255), 2)
62
+
63
+ # plt.figure(figsize=(6, 10))
64
+ # plt.imshow(frame)
65
+ # plt.title(text_query)
66
+ # plt.tight_layout()
67
+ # plt.axis('off')
68
+ # plt.show()
69
+
70
+ buffer = BytesIO()
71
+ frame = Image.fromarray(frame)
72
+ frame.save(buffer, format='jpeg')
73
+ buffer.seek(0)
74
+ encoded_frame = base64.b64encode(buffer.read()).decode("utf-8")
75
+
76
+ return encoded_frame
77
+
78
+ def getCaption(frame, mask, instance_id, text_query, model='gpt-4o', color_mask=False, label_number=True):
79
+
80
+ base64_image = mark_object_and_encode(frame, mask, instance_id, text_query, color_mask, label_number)
81
+
82
+ captioner = OpenAI()
83
+
84
+ #필터링하지 않고 바로 ref exp 만들기
85
+ dense_caption_prompt = f"""
86
+ You are a visual assistant analyzing a single frame of a video.
87
+ In the given frame, I labeled 1 object by marking each with a bright numeric ID at the center and its boundary.
88
+ I also give you a text query describing the marked object.
89
+ I want to use your expression to create an **action-centric referring expression** dataset.
90
+ Based on the frame and text query, please describe the marked object using **clearly observable** and **specific** actions
91
+ ---
92
+ ## Guidelines:
93
+ 1. **Focus on visible, prominent actions** only (e.g., running, pushing, grasping an object).
94
+ 2. **Avoid describing minor or ambiguous actions** (e.g., "slightly moving a paw", "slightly tilting head").
95
+ 3. **Do not include subjective or speculative descriptions** (e.g., “it seems excited” or “it might be preparing to jump”).
96
+ 4. **Avoid vague expressions** like "interacting with something" or "engaging with another object." Instead, specify the action (e.g., "grabbing a stick," "pressing a button").
97
+ 5. **Use dynamic action verbs** (holding, throwing, inspecting, leaning, pressing) to highlight body movement or object/animal interaction.
98
+ 6. If there are multiple objects, ensure the description for the marked object **differentiates** its action.
99
+ 7. Base your description on these action definitions:
100
+ - Avoid using term 'minimal' or 'slightly'.
101
+ - General body movement, body position, or pattern which is prominent. (e.g. "lifting head up", "facing towards", "showing its back")
102
+ - details such as motion and intention, facial with object manipulation
103
+ - movements with object or other entities when they are prominent and observable. expression should be specific.
104
+ (e.g., "pushing another person" (O), "engaging with someone" (X) "interacting with another person" (X))
105
+ --
106
+ ## Output Format:
107
+ - For each labeled object, output **exactly one line**. Your answer should contain details and follow the following format :
108
+ object id. action-oriented description
109
+ (e.g. 1. the person is holding ski poles and skiing on a snow mountain, with his two legs bent forward.)
110
+ ### Example
111
+ If the frame has 1 labeled bear, your output should look like:
112
+ 1. the bear reaching his right arm while leaning forward to capture the prey
113
+ ---
114
+ **Do not include** appearance details (e.g., color, size, texture) or relative positioning (e.g., “on the left/right”).
115
+ **Do not include object IDs** or reference them (e.g., "Person 1" or "object 2" is not allowed).
116
+ **Do not include markdown** in the output.
117
+ Keep in mind that you should not group the object, e.g., 2-5. people: xxx, be sure to describe each object separately (one by one).
118
+ For each labeled object, output referring expressions for each object id.
119
+ """
120
+ prompt_with_text_query = f"prompt: {dense_caption_prompt}\n text query: {text_query}"
121
+
122
+ MAX_RETRIES = 2
123
+ retry_count = 0
124
+
125
+ while retry_count < MAX_RETRIES:
126
+ response = captioner.chat.completions.create(
127
+ model=model,
128
+ messages=[
129
+ {
130
+ "role": "user",
131
+ "content": [
132
+ {
133
+ "type": "text",
134
+ "text": prompt_with_text_query,
135
+ },
136
+ {
137
+ "type": "image_url",
138
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
139
+ },
140
+ ],
141
+ }
142
+ ],
143
+ )
144
+
145
+
146
+ caption = response.choices[0].message.content.strip()
147
+ caption_lower = caption.lower().lstrip()
148
+ if caption_lower.startswith("1.") and not any(
149
+ phrase in caption_lower for phrase in ["i'm sorry", "please", "can't help"]
150
+ ):
151
+ break
152
+ print(f"Retrying caption generation... ({retry_count + 1}/{MAX_RETRIES})")
153
+ retry_count += 1
154
+ time.sleep(2)
155
+
156
+ if retry_count == MAX_RETRIES:
157
+ caption = None
158
+ print("Max retries reached. Caption generation failed.")
159
+
160
+ else:
161
+ caption = None
162
+
163
+ return caption
164
+
165
+ if __name__ == "__main__":
166
+ parser = argparse.ArgumentParser('ReferFormer training and evaluation script', parents=[opts.get_args_parser()])
167
+ parser.add_argument('--save_caption_path', type=str, default='mbench_a2d/numbered_captions.json')
168
+ args = parser.parse_args()
169
+
170
+ train_dataset = build_dataset('a2d', image_set = 'train', args = args)
171
+ text_annotations = train_dataset.text_annotations
172
+
173
+ all_captions = {}
174
+
175
+ #os.environ['OPENAI_API_KEY'] = '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'
176
+ os.environ['OPENAI_API_KEY'] = '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'
177
+
178
+ first_text_query = ""
179
+ for idx in range(300):
180
+ imgs, target = train_dataset[idx]
181
+ frames_idx = target['frames_idx'].tolist()
182
+ text_query, vid_id, frame_id, instance_id = text_annotations[idx]
183
+
184
+ if text_query == first_text_query:
185
+ continue
186
+
187
+ print(f"------------vid id: {vid_id}, frame id: {frame_id}, instance id: {instance_id}", flush=True)
188
+
189
+ frame_id = frame_id - 1
190
+ frame_order = frames_idx.index(frame_id)
191
+
192
+ frame = imgs[frame_order, :, :, :].permute(1, 2, 0).numpy()
193
+ mask = target['masks'].numpy().astype(np.uint8).squeeze()
194
+
195
+ caption = getCaption(frame, mask, instance_id, text_query, model='gpt-4o-mini')
196
+
197
+ if vid_id in all_captions:
198
+ if frame_id in all_captions[vid_id]:
199
+ all_captions[vid_id][frame_id][instance_id] = caption
200
+ else:
201
+ all_captions[vid_id][frame_id] = {instance_id : caption}
202
+ else:
203
+ all_captions[vid_id] = {frame_id : {instance_id: caption}}
204
+
205
+ if idx % 50 == 0:
206
+ with open(args.save_caption_path, 'w') as file:
207
+ json.dump(all_captions, file, indent=4)
208
+
209
+ print("Finished!", flush=True)
210
+
211
+ with open(args.save_caption_path, 'w') as file:
212
+ json.dump(all_captions, file, indent=4)
213
+
.history/slurm_script/jupyter_20250121151552.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=jupyter
4
+ #SBATCH --partition=a5000
5
+ #SBATCH --nodelist=node04
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/jupyter.out
11
+
12
+ ml purge
13
+ ml load cuda/12.1
14
+ eval "$(conda shell.bash hook)"
15
+ conda activate referformer
16
+ srun jupyter notebook --no-browser --port=7890
.history/slurm_script/jupyter_20250121151643.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=jupyter
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/jupyter.out
11
+
12
+ ml purge
13
+ ml load cuda/12.1
14
+ eval "$(conda shell.bash hook)"
15
+ conda activate referformer
16
+ srun jupyter notebook --no-browser --port=7890
.history/slurm_script/mbench_gpt_a2d_20250205122515.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_a2d
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_a2d.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy.py \
19
+ --save_caption_path mbench_a2d/numbered_captions.json
.history/slurm_script/mbench_gpt_ref-ytvos-revised_20250121155940.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_revised
4
+ #SBATCH --partition=a5000
5
+ #SBATCH --nodelist=node04
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_revised.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_revised.py
.history/slurm_script/mbench_gpt_ref-ytvos-revised_20250121160841.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_revised50
4
+ #SBATCH --partition=a5000
5
+ #SBATCH --nodelist=node04
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_revised50.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_revised.py
.history/slurm_script/mbench_gpt_ref-ytvos-revised_20250124085144.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_revised50
4
+ #SBATCH --partition=a5000
5
+ #SBATCH --nodelist=node04
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_revised50.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos-revised.py
.history/slurm_script/mbench_gpt_ref-ytvos_20250119070944.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos.py
.history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250130190228.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_numbered
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_numbered.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy.py \
19
+ --save_caption_path mbench/numbered_captions.json \
20
+ --save_valid_obj_ids_path mbench/numbered_valid_obj_ids.json
.history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250201140706.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_numbered
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_numbered.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy.py \
19
+ --save_caption_path mbench/numbered_captions_gpt-4o.json \
20
+ --save_valid_obj_ids_path mbench/numbered_valid_obj_ids_gpt-4o.json
.history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250202183206.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_numbered
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_numbered.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy.py \
19
+ --save_caption_path mbench/numbered_captions_gpt-4o_no_mask_color.json \
20
+ --save_valid_obj_ids_path mbench/numbered_valid_obj_ids_gpt-4o_no_mask_color.json
.history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250207171604.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_numbered_final
4
+ #SBATCH --partition=a4000
5
+ #SBATCH --nodelist=node05
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_numbered_final.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy_sanity_2.py \
19
+ --save_caption_path mbench/numbered_captions_gpt-4o_final.json \
20
+ --save_valid_obj_ids_path mbench/numbered_valid_obj_ids_gpt-4o_final.json
.history/slurm_script/mbench_gtp_ref-ytvos_numbered_20250207172920.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=mbench_gpt_ref-ytvos_numbered_final
4
+ #SBATCH --partition=a5000
5
+ #SBATCH --nodelist=node04
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --time=14-00:00:00
8
+ #SBATCH --mem=5G
9
+ #SBATCH --cpus-per-task=4
10
+ #SBATCH --output=/home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer/slurm_log/mbench_gpt_ref-ytvos_numbered_final.out
11
+ cd /home/yejin/data/projects/yejin/VerbCentric_RIS/ReferFormer
12
+
13
+ ml purge
14
+ ml load cuda/12.1
15
+ eval "$(conda shell.bash hook)"
16
+ conda activate referformer
17
+
18
+ python3 mbench/gpt_ref-ytvos_numbered_cy_sanity_2.py \
19
+ --save_caption_path mbench/numbered_captions_gpt-4o_final.json \
20
+ --save_valid_obj_ids_path mbench/numbered_valid_obj_ids_gpt-4o_final.json
hf_cache/.locks/models--zhiqiulin--clip-flant5-xxl/ca26d90c9e8e071d0bc31b570aef68306d0be1db4330471d10a117061a15a991.lock ADDED
File without changes
hf_cache/models--zhiqiulin--clip-flant5-xxl/.no_exist/89bad6fffe1126b24d4360c1e1f69145eb6103aa/pytorch_model.bin ADDED
File without changes
hf_cache/models--zhiqiulin--clip-flant5-xxl/blobs/12acb5074c883dcab3e166d86d20130615ff83b0d26736ee046f4184202ebd3b ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12acb5074c883dcab3e166d86d20130615ff83b0d26736ee046f4184202ebd3b
3
+ size 9999791010