Wendy
commited on
Upload bulit_IFD_logits.ipynb with huggingface_hub
Browse files- bulit_IFD_logits.ipynb +270 -0
bulit_IFD_logits.ipynb
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import json\n",
|
10 |
+
"\n",
|
11 |
+
"def read_json(file_path): \n",
|
12 |
+
" with open(file_path, 'r', encoding='utf-8') as file:\n",
|
13 |
+
" data = json.load(file)\n",
|
14 |
+
" return data\n",
|
15 |
+
"\n",
|
16 |
+
"def write_json(file_path, data):\n",
|
17 |
+
" with open(file_path, 'w', encoding='utf-8') as file:\n",
|
18 |
+
" json.dump(data, file, ensure_ascii=False, indent=4)"
|
19 |
+
]
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"cell_type": "code",
|
23 |
+
"execution_count": 2,
|
24 |
+
"metadata": {},
|
25 |
+
"outputs": [],
|
26 |
+
"source": [
|
27 |
+
"data = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_.json')"
|
28 |
+
]
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"cell_type": "code",
|
32 |
+
"execution_count": 6,
|
33 |
+
"metadata": {},
|
34 |
+
"outputs": [],
|
35 |
+
"source": [
|
36 |
+
"for i in data:\n",
|
37 |
+
" i['conversations'][0]['value'] = i['conversations'][0]['value'].replace('<image>\\n','')\n",
|
38 |
+
" # break"
|
39 |
+
]
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"cell_type": "code",
|
43 |
+
"execution_count": 5,
|
44 |
+
"metadata": {},
|
45 |
+
"outputs": [
|
46 |
+
{
|
47 |
+
"data": {
|
48 |
+
"text/plain": [
|
49 |
+
"{'id': 0,\n",
|
50 |
+
" 'image': 'llava_image_tune/coco/train2017/000000033471.jpg',\n",
|
51 |
+
" 'conversations': [{'from': 'human',\n",
|
52 |
+
" 'value': 'What are the colors of the bus in the image?'},\n",
|
53 |
+
" {'from': 'gpt', 'value': 'The bus in the image is white and red.'},\n",
|
54 |
+
" {'from': 'human',\n",
|
55 |
+
" 'value': 'What feature can be seen on the back of the bus?'},\n",
|
56 |
+
" {'from': 'gpt', 'value': 'The back of the bus features an advertisement.'},\n",
|
57 |
+
" {'from': 'human',\n",
|
58 |
+
" 'value': 'Is the bus driving down the street or pulled off to the side?'},\n",
|
59 |
+
" {'from': 'gpt',\n",
|
60 |
+
" 'value': 'The bus is driving down the street, which is crowded with people and other vehicles.'}]}"
|
61 |
+
]
|
62 |
+
},
|
63 |
+
"execution_count": 5,
|
64 |
+
"metadata": {},
|
65 |
+
"output_type": "execute_result"
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"source": [
|
69 |
+
"data[0]"
|
70 |
+
]
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"cell_type": "markdown",
|
74 |
+
"metadata": {},
|
75 |
+
"source": [
|
76 |
+
"这里可以解释 loss 大小 和loss ifd 对于数据的影响\n",
|
77 |
+
"\n",
|
78 |
+
"high loss 意味着 整体对话风格 都比较难学,而不是事实因素\n",
|
79 |
+
"\n",
|
80 |
+
"high ifd 意味着 对话并不依赖图片\n",
|
81 |
+
"\n",
|
82 |
+
"不同意义上的 高质量"
|
83 |
+
]
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"cell_type": "code",
|
87 |
+
"execution_count": 7,
|
88 |
+
"metadata": {},
|
89 |
+
"outputs": [],
|
90 |
+
"source": [
|
91 |
+
"# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_NImg.json',data)"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "code",
|
96 |
+
"execution_count": null,
|
97 |
+
"metadata": {},
|
98 |
+
"outputs": [],
|
99 |
+
"source": []
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "markdown",
|
103 |
+
"metadata": {},
|
104 |
+
"source": [
|
105 |
+
"------"
|
106 |
+
]
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"cell_type": "code",
|
110 |
+
"execution_count": 2,
|
111 |
+
"metadata": {},
|
112 |
+
"outputs": [],
|
113 |
+
"source": [
|
114 |
+
"data = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_.json')"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"cell_type": "code",
|
119 |
+
"execution_count": 3,
|
120 |
+
"metadata": {},
|
121 |
+
"outputs": [],
|
122 |
+
"source": [
|
123 |
+
"import copy \n",
|
124 |
+
"\n",
|
125 |
+
"template = ''' \n",
|
126 |
+
"Does the previous paragraph demarcated within ### and ###\n",
|
127 |
+
"contain informative signal for visual instruction tuning a vision-language model?\n",
|
128 |
+
"An informative datapoint should be well-formatted, contain some\n",
|
129 |
+
"usable knowledge of the world, and strictly NOT have any harmful,\n",
|
130 |
+
"racist, sexist, etc. content.\n",
|
131 |
+
"OPTIONS:\n",
|
132 |
+
"- yes\n",
|
133 |
+
"- no\n",
|
134 |
+
"'''\n",
|
135 |
+
" \n",
|
136 |
+
"con_template = [{'from': 'human',\n",
|
137 |
+
" 'value': '<image>\\nWhat are the colors of the bus in the image?'},\n",
|
138 |
+
" {'from': 'gpt', 'value': 'The bus in the image is white and red.'}]\n",
|
139 |
+
"\n",
|
140 |
+
"\n",
|
141 |
+
"for i in data:\n",
|
142 |
+
" i['ori_conversations'] = copy.deepcopy(i['conversations'])\n",
|
143 |
+
" \n",
|
144 |
+
" sentence = ''\n",
|
145 |
+
" for j in i['conversations']:\n",
|
146 |
+
" sentence = sentence + j['value'] + ' '\n",
|
147 |
+
" \n",
|
148 |
+
" final_sent = \"############\\n\" + sentence + \"############\\n\" + template\n",
|
149 |
+
" final_sent = final_sent #.replace('<image>','')\n",
|
150 |
+
" final_result = \"response: yes\"\n",
|
151 |
+
" \n",
|
152 |
+
" new_con_template = copy.deepcopy(con_template)\n",
|
153 |
+
" new_con_template[0]['value'] = final_sent\n",
|
154 |
+
" new_con_template[1]['value'] = final_result\n",
|
155 |
+
" i['conversations'] = new_con_template\n",
|
156 |
+
" \n",
|
157 |
+
" # del i['image']\n",
|
158 |
+
" # i['Old_Path'] = i.pop('image')\n",
|
159 |
+
" # break"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "code",
|
164 |
+
"execution_count": 4,
|
165 |
+
"metadata": {},
|
166 |
+
"outputs": [
|
167 |
+
{
|
168 |
+
"data": {
|
169 |
+
"text/plain": [
|
170 |
+
"{'id': 0,\n",
|
171 |
+
" 'image': 'llava_image_tune/coco/train2017/000000033471.jpg',\n",
|
172 |
+
" 'conversations': [{'from': 'human',\n",
|
173 |
+
" 'value': '############\\n<image>\\nWhat are the colors of the bus in the image? The bus in the image is white and red. What feature can be seen on the back of the bus? The back of the bus features an advertisement. Is the bus driving down the street or pulled off to the side? The bus is driving down the street, which is crowded with people and other vehicles. ############\\n \\nDoes the previous paragraph demarcated within ### and ###\\ncontain informative signal for visual instruction tuning a vision-language model?\\nAn informative datapoint should be well-formatted, contain some\\nusable knowledge of the world, and strictly NOT have any harmful,\\nracist, sexist, etc. content.\\nOPTIONS:\\n- yes\\n- no\\n'},\n",
|
174 |
+
" {'from': 'gpt', 'value': 'response: yes'}],\n",
|
175 |
+
" 'ori_conversations': [{'from': 'human',\n",
|
176 |
+
" 'value': '<image>\\nWhat are the colors of the bus in the image?'},\n",
|
177 |
+
" {'from': 'gpt', 'value': 'The bus in the image is white and red.'},\n",
|
178 |
+
" {'from': 'human',\n",
|
179 |
+
" 'value': 'What feature can be seen on the back of the bus?'},\n",
|
180 |
+
" {'from': 'gpt', 'value': 'The back of the bus features an advertisement.'},\n",
|
181 |
+
" {'from': 'human',\n",
|
182 |
+
" 'value': 'Is the bus driving down the street or pulled off to the side?'},\n",
|
183 |
+
" {'from': 'gpt',\n",
|
184 |
+
" 'value': 'The bus is driving down the street, which is crowded with people and other vehicles.'}]}"
|
185 |
+
]
|
186 |
+
},
|
187 |
+
"execution_count": 4,
|
188 |
+
"metadata": {},
|
189 |
+
"output_type": "execute_result"
|
190 |
+
}
|
191 |
+
],
|
192 |
+
"source": [
|
193 |
+
"data[0]"
|
194 |
+
]
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"cell_type": "code",
|
198 |
+
"execution_count": 5,
|
199 |
+
"metadata": {},
|
200 |
+
"outputs": [],
|
201 |
+
"source": [
|
202 |
+
"# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_logits_Img.json',data)"
|
203 |
+
]
|
204 |
+
},
|
205 |
+
{
|
206 |
+
"cell_type": "code",
|
207 |
+
"execution_count": 2,
|
208 |
+
"metadata": {},
|
209 |
+
"outputs": [],
|
210 |
+
"source": [
|
211 |
+
"# data1 = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_logits_Img_20P.json')\n",
|
212 |
+
"data1 = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_logits_NoImg_20P.json')\n",
|
213 |
+
"data2 = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_rand_20P.json')\n",
|
214 |
+
"\n",
|
215 |
+
"\n"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
{
|
219 |
+
"cell_type": "code",
|
220 |
+
"execution_count": 3,
|
221 |
+
"metadata": {},
|
222 |
+
"outputs": [],
|
223 |
+
"source": [
|
224 |
+
"new_data = data1 + data2"
|
225 |
+
]
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"cell_type": "code",
|
229 |
+
"execution_count": 4,
|
230 |
+
"metadata": {},
|
231 |
+
"outputs": [],
|
232 |
+
"source": [
|
233 |
+
"# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_rand_logits_Img_40P.json',new_data)\n",
|
234 |
+
"write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_rand_logits_NoImg_40P.json',new_data)\n",
|
235 |
+
"\n",
|
236 |
+
"\n",
|
237 |
+
"\n",
|
238 |
+
"\n"
|
239 |
+
]
|
240 |
+
},
|
241 |
+
{
|
242 |
+
"cell_type": "code",
|
243 |
+
"execution_count": null,
|
244 |
+
"metadata": {},
|
245 |
+
"outputs": [],
|
246 |
+
"source": []
|
247 |
+
}
|
248 |
+
],
|
249 |
+
"metadata": {
|
250 |
+
"kernelspec": {
|
251 |
+
"display_name": "llava",
|
252 |
+
"language": "python",
|
253 |
+
"name": "python3"
|
254 |
+
},
|
255 |
+
"language_info": {
|
256 |
+
"codemirror_mode": {
|
257 |
+
"name": "ipython",
|
258 |
+
"version": 3
|
259 |
+
},
|
260 |
+
"file_extension": ".py",
|
261 |
+
"mimetype": "text/x-python",
|
262 |
+
"name": "python",
|
263 |
+
"nbconvert_exporter": "python",
|
264 |
+
"pygments_lexer": "ipython3",
|
265 |
+
"version": "3.10.16"
|
266 |
+
}
|
267 |
+
},
|
268 |
+
"nbformat": 4,
|
269 |
+
"nbformat_minor": 2
|
270 |
+
}
|