Spaces:
Runtime error
Runtime error
yonatanbitton
commited on
Commit
·
5abf748
1
Parent(s):
7278f86
commit
Browse files- .idea/.gitignore +3 -0
- .idea/inspectionProfiles/Project_Default.xml +16 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- .idea/winoground_vq2.iml +8 -0
- BLIP2_Q2_CM.csv +0 -0
- app.py +119 -0
.idea/.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
5 |
+
<option name="ignoredPackages">
|
6 |
+
<value>
|
7 |
+
<list size="3">
|
8 |
+
<item index="0" class="java.lang.String" itemvalue="matplotlib" />
|
9 |
+
<item index="1" class="java.lang.String" itemvalue="CLIP" />
|
10 |
+
<item index="2" class="java.lang.String" itemvalue="transformers" />
|
11 |
+
</list>
|
12 |
+
</value>
|
13 |
+
</option>
|
14 |
+
</inspection_tool>
|
15 |
+
</profile>
|
16 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Pandas" project-jdk-type="Python SDK" />
|
4 |
+
</project>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/winoground_vq2.iml" filepath="$PROJECT_DIR$/.idea/winoground_vq2.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
.idea/winoground_vq2.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="inheritedJdk" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
BLIP2_Q2_CM.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
import pandas as pd
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
def list_to_string(lst):
|
7 |
+
return '\n'.join(['{}. {}'.format(i+1, item) for i, item in enumerate(lst)])
|
8 |
+
|
9 |
+
auth_token = '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'
|
10 |
+
winoground = load_dataset("facebook/winoground", use_auth_token=auth_token)["test"]
|
11 |
+
df = pd.read_csv('BLIP2_Q2_CM.csv')
|
12 |
+
winoground_pd = winoground.to_pandas()
|
13 |
+
filter_id_set = set(df['id'])
|
14 |
+
winoground_pd = winoground_pd[winoground_pd['id'].isin(filter_id_set)]
|
15 |
+
for c in ['c0_i0_blip2', 'c0_i1_blip2', 'c1_i0_blip2', 'c1_i1_blip2', 'c0_i0_q2', 'c1_i0_q2', 'c0_i1_q2', 'c1_i1_q2']:
|
16 |
+
df[c] = df[c].apply(lambda x: round(x, 1))
|
17 |
+
print("Load wino")
|
18 |
+
import json
|
19 |
+
df['qa_samples'] = df['qa_samples'].apply(json.loads)
|
20 |
+
def func(index):
|
21 |
+
print(f"index: {index}")
|
22 |
+
example = winoground_pd.iloc[int(index)]
|
23 |
+
values = get_instance_values(example)
|
24 |
+
# print(f"index: {index}")
|
25 |
+
# return get_img(example["image_0"]), example["caption_0"], get_img(example["image_1"]), example["caption_1"]
|
26 |
+
# preds = ['yoyoyo']
|
27 |
+
return values
|
28 |
+
|
29 |
+
demo = gr.Blocks()
|
30 |
+
|
31 |
+
import PIL
|
32 |
+
def process_key(item):
|
33 |
+
if 'path' in item:
|
34 |
+
return PIL.Image.open(item['path'])
|
35 |
+
return item
|
36 |
+
|
37 |
+
def get_vals(items):
|
38 |
+
values = []
|
39 |
+
for idx, e in enumerate(items):
|
40 |
+
if idx == 0:
|
41 |
+
gr_val = gr.Image(value=e)
|
42 |
+
else:
|
43 |
+
gr_val = gr.Textbox(value=e)
|
44 |
+
values.append(gr_val)
|
45 |
+
return values
|
46 |
+
|
47 |
+
def get_instance_values(example):
|
48 |
+
item_q2 = df[df['id'] == example['id']].iloc[0].to_dict()
|
49 |
+
left_keys = [x for x in example.keys() if '_0' in x]
|
50 |
+
right_keys = [x for x in example.keys() if '_1' in x]
|
51 |
+
left = [process_key(example[k]) for k in left_keys]
|
52 |
+
right = [process_key(example[k]) for k in right_keys]
|
53 |
+
# for k in left_side_columns + right_side_columns:
|
54 |
+
# if k in enumerate_cols:
|
55 |
+
# value = list_to_string(example[k])
|
56 |
+
# elif k == QA:
|
57 |
+
# qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
|
58 |
+
# value = list_to_string(qa_list)
|
59 |
+
# else:
|
60 |
+
# value = example[k]
|
61 |
+
# values.append(value)
|
62 |
+
blip_preds = {k.split("_blip2")[0]:v for k,v in item_q2.items() if "_blip" in k}
|
63 |
+
q2_preds = {k.split("_q2")[0]: v for k, v in item_q2.items() if "_q2" in k}
|
64 |
+
del blip_preds['model']
|
65 |
+
del q2_preds['model']
|
66 |
+
blip_preds_str = dict2string(blip_preds)
|
67 |
+
q2_preds_str = dict2string(q2_preds)
|
68 |
+
qas = pd.DataFrame(item_q2['qa_samples'])
|
69 |
+
qa_image_0 = qas[qas['img_caption'].apply(lambda x: 'image_0' in x)]
|
70 |
+
qa_image_1 = qas[qas['img_caption'].apply(lambda x: 'image_1' in x)]
|
71 |
+
qas_image_0_str = qa_image_0.to_string()
|
72 |
+
qas_image_1_str = qa_image_1.to_string()
|
73 |
+
return [blip_preds_str, q2_preds_str, qas_image_0_str, qas_image_1_str] + left + right
|
74 |
+
|
75 |
+
def dict2string(d):
|
76 |
+
s = ''
|
77 |
+
for idx, (k, v) in enumerate(d.items()):
|
78 |
+
s += f"{k}: {v}"
|
79 |
+
if idx < len(d) - 1:
|
80 |
+
s += '\t|\t'
|
81 |
+
return s
|
82 |
+
|
83 |
+
with demo:
|
84 |
+
gr.Markdown("# Slide across the slider to see various examples from WinoGround")
|
85 |
+
with gr.Column():
|
86 |
+
index = random.choice(range(0, len(winoground_pd)))
|
87 |
+
# index = 0
|
88 |
+
example = winoground_pd.iloc[index]
|
89 |
+
example_values = get_instance_values(example)
|
90 |
+
first_item = example_values[0]
|
91 |
+
second_item = example_values[1]
|
92 |
+
third_item = example_values[2]
|
93 |
+
fourth_item = example_values[3]
|
94 |
+
rest_values = example_values[4:]
|
95 |
+
item_q2 = df[df['id'] == example['id']].iloc[0]
|
96 |
+
slider = gr.Slider(minimum=0, maximum=len(winoground_pd), step=1)
|
97 |
+
preds_blip2 = gr.Textbox(value=first_item, label='BLIP2')
|
98 |
+
preds_q2 = gr.Textbox(value=second_item, label='Visual Q^2')
|
99 |
+
with gr.Row():
|
100 |
+
with gr.Column():
|
101 |
+
qas_image_0 = gr.Textbox(value=third_item, label='QA Pairs (Image 0)')
|
102 |
+
with gr.Column():
|
103 |
+
qas_image_1 = gr.Textbox(value=fourth_item, label='QA Pairs (Image 1)')
|
104 |
+
with gr.Row():
|
105 |
+
items_left = rest_values[:int(len(rest_values)/2)]
|
106 |
+
items_right = rest_values[int(len(rest_values)/2):]
|
107 |
+
with gr.Column():
|
108 |
+
# image_input_1 = gr.Image(value=get_img(winoground_pd.iloc[index]["image_0"]))
|
109 |
+
# text_input_1 = gr.Textbox(value=winoground_pd.iloc[index]["caption_0"])
|
110 |
+
items = items_left
|
111 |
+
gr_values_left = get_vals(items_left)
|
112 |
+
with gr.Column():
|
113 |
+
# image_input_2 = gr.Image(value=get_img(winoground_pd.iloc[index]["image_1"]))
|
114 |
+
# text_input_2 = gr.Textbox(value=winoground_pd.iloc[index]["caption_1"])
|
115 |
+
gr_values_right = get_vals(items_right)
|
116 |
+
|
117 |
+
slider.change(func, inputs=[slider], outputs=[preds_blip2, preds_q2, qas_image_0, qas_image_1] + gr_values_left + gr_values_right)
|
118 |
+
|
119 |
+
demo.launch(auth=("admin", "visual_q2_secret"))
|