Wendy commited on
Commit
bb2355c
·
verified ·
1 Parent(s): 5724c3d

Upload 2 files

Browse files
.gitattributes CHANGED
@@ -41,3 +41,4 @@ general_blip_train_llava_70ORI_30COCO_swift.json filter=lfs diff=lfs merge=lfs -
41
  general_blip_train_llava_70ORI_30COCO_swift_A100.json filter=lfs diff=lfs merge=lfs -text
42
  general_blip_train_llava_coco_swift_A100.json filter=lfs diff=lfs merge=lfs -text
43
  general_blip_train_llava_swift_A100.json filter=lfs diff=lfs merge=lfs -text
 
 
41
  general_blip_train_llava_70ORI_30COCO_swift_A100.json filter=lfs diff=lfs merge=lfs -text
42
  general_blip_train_llava_coco_swift_A100.json filter=lfs diff=lfs merge=lfs -text
43
  general_blip_train_llava_swift_A100.json filter=lfs diff=lfs merge=lfs -text
44
+ general_blip_train_llava_imgh_swift_multi_A100.json filter=lfs diff=lfs merge=lfs -text
aitw_to_swift.ipynb ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os \n",
10
+ "import json\n",
11
+ "\n",
12
+ "def read_json(file_path): \n",
13
+ " with open(file_path, 'r', encoding='utf-8') as file:\n",
14
+ " data = json.load(file)\n",
15
+ " return data\n",
16
+ "\n",
17
+ "def write_json(file_path, data):\n",
18
+ " with open(file_path, 'w', encoding='utf-8') as file:\n",
19
+ " json.dump(data, file, ensure_ascii=False, indent=4)"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "markdown",
24
+ "metadata": {},
25
+ "source": [
26
+ "#### 更换路径"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 5,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "# data = read_json('/code/Data/m4_instruct_annotations.json')\n",
36
+ "# data = read_json('/code/Data/general_blip_train_llava_imgh.json')\n",
37
+ "data = read_json('/code/Data/general_blip_train_llava_swift.json')\n",
38
+ "# data = read_json('/code/Data/general_blip_test_llava_swift.json')\n",
39
+ "\n",
40
+ "# data = read_json('/code/LLaVA/data/json/general_blip_train_llava.json')\n",
41
+ "# data = read_json('/code/LLaVA/data/json/all_blip_train_llava_coco.json')\n"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 4,
47
+ "metadata": {},
48
+ "outputs": [
49
+ {
50
+ "name": "stdout",
51
+ "output_type": "stream",
52
+ "text": [
53
+ "{'conversations': [{'from': 'human',\n",
54
+ " 'value': '<image>\\n'\n",
55
+ " 'Previous Actions: Goal: Open a new Chrome '\n",
56
+ " 'private window'},\n",
57
+ " {'from': 'gpt',\n",
58
+ " 'value': 'Action Plan: '\n",
59
+ " '[DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,STATUS_TASK_COMPLETE]\\n'\n",
60
+ " '; Action Decision: \"action_type\": \"DUAL_POINT\", '\n",
61
+ " '\"touch_point\": \"[0.7761, 0.7089]\", \"lift_point\": '\n",
62
+ " '\"[0.7761, 0.7089]\", \"typed_text\": \"\"'}],\n",
63
+ " 'id': 'general_blip_0',\n",
64
+ " 'image': 'blip/general_texts_splits/10_1.png'}\n"
65
+ ]
66
+ }
67
+ ],
68
+ "source": [
69
+ "import pprint\n",
70
+ "\n",
71
+ "pprint.pprint(data[0])"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 6,
77
+ "metadata": {},
78
+ "outputs": [
79
+ {
80
+ "name": "stdout",
81
+ "output_type": "stream",
82
+ "text": [
83
+ "{'images': ['/code/Auto-GUI/dataset/blip/general_texts_splits/10_1.png'],\n",
84
+ " 'query': '<<image>\\n'\n",
85
+ " 'Previous Actions: Goal: Open a new Chrome private window>55555',\n",
86
+ " 'response': 'Action Plan: '\n",
87
+ " '[DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,DUAL_POINT,STATUS_TASK_COMPLETE]\\n'\n",
88
+ " '; Action Decision: \"action_type\": \"DUAL_POINT\", \"touch_point\": '\n",
89
+ " '\"[0.7761, 0.7089]\", \"lift_point\": \"[0.7761, 0.7089]\", '\n",
90
+ " '\"typed_text\": \"\"'}\n"
91
+ ]
92
+ }
93
+ ],
94
+ "source": [
95
+ "import pprint\n",
96
+ "\n",
97
+ "pprint.pprint(data[0])"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "code",
102
+ "execution_count": 11,
103
+ "metadata": {},
104
+ "outputs": [
105
+ {
106
+ "data": {
107
+ "text/plain": [
108
+ "8831"
109
+ ]
110
+ },
111
+ "execution_count": 11,
112
+ "metadata": {},
113
+ "output_type": "execute_result"
114
+ }
115
+ ],
116
+ "source": [
117
+ "len(data)"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": 12,
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "for index, i in enumerate(data):\n",
127
+ "\n",
128
+ " # ################## A6000 ##################\n",
129
+ " # data[index]['images'][0] = '/data/home/zbz5349/WorkSpace/LLaVA/data/blip' + data[index]['images'][0][27:]\n",
130
+ "\n",
131
+ " ################## H100 ##################\n",
132
+ " data[index]['images'][0] = '/gpu02home/zbz5349/ICLR_2024/LLaVA_Mobile_V1/data/blip' + data[index]['images'][0][27:]\n",
133
+ "\n"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": 13,
139
+ "metadata": {},
140
+ "outputs": [],
141
+ "source": [
142
+ "# write_json('/code/Data/general_blip_train_llava_swift_a6000.json', data)\n",
143
+ "# write_json('/code/Data/general_blip_train_llava_swift_H100.json', data)\n",
144
+ "write_json('/code/Data/general_blip_test_llava_swift_H100.json', data)\n",
145
+ "\n"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "metadata": {},
151
+ "source": [
152
+ "-----"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "markdown",
157
+ "metadata": {},
158
+ "source": [
159
+ "### 重构格式 / Single Image"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 41,
165
+ "metadata": {},
166
+ "outputs": [],
167
+ "source": [
168
+ "path = '/code/LLaVA/data/json/general_blip_train_llava.json'\n",
169
+ "# path = '/code/LLaVA/data/json/general_blip_train_llava_coco.json'\n",
170
+ "# path = '/code/LLaVA/data/json/general_blip_train_llava_70ORI_30COCO.json'\n",
171
+ "\n",
172
+ "# path = '/code/LLaVA/data/json/all_blip_train_llava.json'\n",
173
+ "# path = '/code/LLaVA/data/json/all_blip_train_llava_coco.json'\n",
174
+ "\n",
175
+ "data = read_json(path)"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": 42,
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": [
184
+ "\n",
185
+ "new_data = []\n",
186
+ "for index, i in enumerate(data):\n",
187
+ "\n",
188
+ " temp = {}\n",
189
+ " temp['query'] = i['conversations'][0]['value']\n",
190
+ " temp['response'] = i['conversations'][1]['value']\n",
191
+ " temp['images'] = []\n",
192
+ "\n",
193
+ " ############## H100\n",
194
+ " # temp_image_path = '/gpu02home/zbz5349/ICLR_2024/LLaVA_Mobile_V1/data/' + i['image'] \n",
195
+ "\n",
196
+ " ############## A100\n",
197
+ " temp_image_path = '/data/zbz5349/ICLR_2024/data/' + i['image']\n",
198
+ " \n",
199
+ " temp['images'].append(temp_image_path)\n",
200
+ "\n",
201
+ " new_data.append(temp)\n",
202
+ " # pprint.pprint(temp)\n",
203
+ " # break"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 43,
209
+ "metadata": {},
210
+ "outputs": [],
211
+ "source": [
212
+ "new_path = path.split('.')[0] + '_swift_A100.json'\n",
213
+ "write_json(new_path, new_data)"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": 40,
219
+ "metadata": {},
220
+ "outputs": [
221
+ {
222
+ "data": {
223
+ "text/plain": [
224
+ "'/code/LLaVA/data/json/all_blip_train_llava_swift.json'"
225
+ ]
226
+ },
227
+ "execution_count": 40,
228
+ "metadata": {},
229
+ "output_type": "execute_result"
230
+ }
231
+ ],
232
+ "source": [
233
+ "new_path"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "markdown",
238
+ "metadata": {},
239
+ "source": [
240
+ "### 重构格式 / Multi Image"
241
+ ]
242
+ },
243
+ {
244
+ "cell_type": "code",
245
+ "execution_count": 2,
246
+ "metadata": {},
247
+ "outputs": [],
248
+ "source": [
249
+ "path = '/code/Data/general_blip_train_llava_imgh.json'\n",
250
+ " \n",
251
+ "data = read_json(path)"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "code",
256
+ "execution_count": 36,
257
+ "metadata": {},
258
+ "outputs": [],
259
+ "source": [
260
+ "import pprint\n",
261
+ "\n",
262
+ "\n",
263
+ "def replace_hashes(text):\n",
264
+ " # 使用 rsplit 分割并替换最后五个 ### 为 xxx\n",
265
+ " parts = text.rsplit('###', 4)\n",
266
+ " # 将替换后的文本重新拼接\n",
267
+ " return '<image>'.join(parts)\n",
268
+ "\n",
269
+ "def ensure_five_xxx(text):\n",
270
+ " # 统计文本中 'xxx' 的出现次数\n",
271
+ " count = text.count('<image>')\n",
272
+ " if count < 5:\n",
273
+ " # 计算还需要补充多少个 'xxx'\n",
274
+ " missing_xxx = 5 - count\n",
275
+ " # 在文本末尾添加足够的 'xxx'\n",
276
+ " text += ' <image>' * missing_xxx\n",
277
+ " \n",
278
+ " return text\n",
279
+ "\n",
280
+ "\n",
281
+ "new_data = []\n",
282
+ "for index, i in enumerate(data):\n",
283
+ "\n",
284
+ " temp = {}\n",
285
+ " temp['query'] = i['conversations'][0]['value']\n",
286
+ " temp['response'] = i['conversations'][1]['value']\n",
287
+ " \n",
288
+ " ############## A100 ##############\n",
289
+ " temp_image_path_list = []\n",
290
+ " for w in i['image_history']:\n",
291
+ " temp_image_path = '/data/zbz5349/ICLR_2024/data/' + w\n",
292
+ " temp_image_path_list.append(temp_image_path)\n",
293
+ "\n",
294
+ " temp['images'] = temp_image_path_list \n",
295
+ " \n",
296
+ " new_temp = temp['query'].split('\"action_type\"')\n",
297
+ " new_temp = ' ###\\n \"action_type\"'.join(new_temp)\n",
298
+ " # pprint.pprint(new_temp) \n",
299
+ " new_temp = replace_hashes(new_temp)\n",
300
+ " new_temp = ensure_five_xxx(new_temp)\n",
301
+ " # pprint.pprint(new_temp) \n",
302
+ " # break\n",
303
+ "\n",
304
+ " temp['query'] = new_temp\n",
305
+ " new_data.append(temp)"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 40,
311
+ "metadata": {},
312
+ "outputs": [
313
+ {
314
+ "name": "stdout",
315
+ "output_type": "stream",
316
+ "text": [
317
+ "{'images': ['/data/zbz5349/ICLR_2024/data/blip/general_texts_splits/13_3.png',\n",
318
+ " '/data/zbz5349/ICLR_2024/data/blip/general_texts_splits/13_2.png',\n",
319
+ " '/data/zbz5349/ICLR_2024/data/blip/general_texts_splits/13_1.png',\n",
320
+ " '/data/zbz5349/ICLR_2024/data/blip/general_texts_splits/13_1.png',\n",
321
+ " '/data/zbz5349/ICLR_2024/data/blip/general_texts_splits/13_1.png'],\n",
322
+ " 'query': '<image>\\n'\n",
323
+ " 'Previous Actions: <image>\\n'\n",
324
+ " ' \"action_type\": \"PRESS_HOME\", \"touch_point\": \"[-1.0, -1.0]\", '\n",
325
+ " '\"lift_point\": \"[-1.0, -1.0]\", \"typed_text\": \"\" <image>\\n'\n",
326
+ " ' \"action_type\": \"DUAL_POINT\", \"touch_point\": \"[0.7649, 0.6773]\", '\n",
327
+ " '\"lift_point\": \"[0.7649, 0.6773]\", \"typed_text\": \"\" Goal: Open a new '\n",
328
+ " 'Chrome window <image> <image>',\n",
329
+ " 'response': 'Action Plan: [STATUS_TASK_COMPLETE]\\n'\n",
330
+ " '; Action Decision: \"action_type\": \"STATUS_TASK_COMPLETE\", '\n",
331
+ " '\"touch_point\": \"[-1.0, -1.0]\", \"lift_point\": \"[-1.0, -1.0]\", '\n",
332
+ " '\"typed_text\": \"\"'}\n"
333
+ ]
334
+ }
335
+ ],
336
+ "source": [
337
+ "\n",
338
+ "# data[0]\n",
339
+ "\n",
340
+ "pprint.pprint(new_data[11]) "
341
+ ]
342
+ },
343
+ {
344
+ "cell_type": "code",
345
+ "execution_count": 41,
346
+ "metadata": {},
347
+ "outputs": [],
348
+ "source": [
349
+ "new_path = path.split('.')[0] + '_swift_multi_A100.json'\n",
350
+ "write_json(new_path, new_data)"
351
+ ]
352
+ },
353
+ {
354
+ "cell_type": "code",
355
+ "execution_count": null,
356
+ "metadata": {},
357
+ "outputs": [],
358
+ "source": []
359
+ }
360
+ ],
361
+ "metadata": {
362
+ "kernelspec": {
363
+ "display_name": "llava",
364
+ "language": "python",
365
+ "name": "python3"
366
+ },
367
+ "language_info": {
368
+ "codemirror_mode": {
369
+ "name": "ipython",
370
+ "version": 3
371
+ },
372
+ "file_extension": ".py",
373
+ "mimetype": "text/x-python",
374
+ "name": "python",
375
+ "nbconvert_exporter": "python",
376
+ "pygments_lexer": "ipython3",
377
+ "version": "3.10.13"
378
+ }
379
+ },
380
+ "nbformat": 4,
381
+ "nbformat_minor": 2
382
+ }
general_blip_train_llava_imgh_swift_multi_A100.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbe3be1b9f4b1e9c2774835fab06ea53f16b1e90415d3c0ca019260646e89ffd
3
+ size 95345344