Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- Images.zip +3 -0
- OpenMMMedical.tsv +3 -0
- README.md +156 -1
- baichuan.py +178 -0
- image_mcq.py +1082 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
OpenMMMedical.tsv filter=lfs diff=lfs merge=lfs -text
|
Images.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6bb52eba700efd6e0562af79a9ff1870a3f4cb4c0f28e3e4b4f9c00cd31b41a9
|
3 |
+
size 10702783738
|
OpenMMMedical.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c26f979cc2cc6539177e908ee55e05ffa1068404c3ae6c6c043c7553f73cb461
|
3 |
+
size 20853275
|
README.md
CHANGED
@@ -1,3 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
-
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# OpenMM-Medical
|
2 |
+
|
3 |
+
## Introduction
|
4 |
+
|
5 |
+
OpenMM-Medical is a comprehensive large-scale medical evaluation dataset that spans multiple domains, including Magnetic Resonance Imaging (MRI), CT scans, X-rays, microscopy images, endoscopy, fundus imaging, and dermoscopy.
|
6 |
+
OpenMM-Medical is an integration of existing datasets, comprising a total of 88,996 entries. It is designed to advance the development of multimodal medical large language models within the research community.
|
7 |
+
|
8 |
+
Components | Content | Type | Number | Metrics
|
9 |
+
| :----: | :----: |:----: | :----: |:----: |
|
10 |
+
ACRIMA | Fundus Photography | Multiple Choice Question Answering | 159 | Acc
|
11 |
+
Adam Challenge | Endoscopy | Multiple Choice Question Answering | 87 | Acc
|
12 |
+
ALL Challenge | Microscopy Images | Multiple Choice Question Answering | 342 | Acc
|
13 |
+
BioMediTech | Microscopy Images | Multiple Choice Question Answering | 511 | Acc
|
14 |
+
Blood Cell | Microscopy Images | Multiple Choice Question Answering | 1175 | Acc
|
15 |
+
BreakHis | Magnetic Resonance Imaging | Multiple Choice Question Answering | 735 | Acc
|
16 |
+
Chest CT Scan | CT Imaging | Multiple Choice Question Answering | 871 | Acc
|
17 |
+
Chest X-Ray PA | X-Ray | Multiple Choice Question Answering | 850 | Acc
|
18 |
+
CoronaHack | X-Ray | Multiple Choice Question Answering | 684 | Acc
|
19 |
+
Covid CT | CT Imaging | Multiple Choice Question Answering | 199 | Acc
|
20 |
+
Covid-19 tianchi | X-Ray | Multiple Choice Question Answering | 96 | Acc
|
21 |
+
Covid19 heywhale | X-Ray | Multiple Choice Question Answering | 690 | Acc
|
22 |
+
COVIDx CXR-4 | X-Ray | Multiple Choice Question Answering | 485 | Acc
|
23 |
+
CRC100k | Magnetic Resonance Imaging | Multiple Choice Question Answering | 1322 | Acc
|
24 |
+
DeepDRiD | Fundus Photography | Multiple Choice Question Answering | 131 | Acc
|
25 |
+
Diabetic Retinopathy | Fundus Photography | Multiple Choice Question Answering | 2051 | Acc
|
26 |
+
DRIMDB | Fundus Photography | Multiple Choice Question Answering | 132 | Acc
|
27 |
+
Fitzpatrick 17k | Dermoscopy | Multiple Choice Question Answering | 1552 | Acc
|
28 |
+
HuSHeM | Microscopy Images | Multiple Choice Question Answering | 89 | Acc
|
29 |
+
ISBI2016 | Dermoscopy | Multiple Choice Question Answering | 681 | Acc
|
30 |
+
ISIC2018 | Dermoscopy | Multiple Choice Question Answering | 272 | Acc
|
31 |
+
ISIC2019 | Dermoscopy | Multiple Choice Question Answering | 1952 | Acc
|
32 |
+
ISIC2020 | Dermoscopy | Multiple Choice Question Answering | 1580 | Acc
|
33 |
+
JSIEC | Fundus Photography | Multiple Choice Question Answering | 220 | Acc
|
34 |
+
Knee Osteoarthritis | X-Ray | Multiple Choice Question Answering | 518 | Acc
|
35 |
+
MAlig Lymph | Magnetic Resonance Imaging | Multiple Choice Question Answering | 149 | Acc
|
36 |
+
MHSMA | Microscopy Images | Multiple Choice Question Answering | 1282 | Acc
|
37 |
+
MIAS | X-Ray | Multiple Choice Question Answering | 142 | Acc
|
38 |
+
Monkeypox Skin Image 2022 | Dermoscopy | Multiple Choice Question Answering | 163 | Acc
|
39 |
+
Mura | X-Ray | Multiple Choice Question Answering | 1464 | Acc
|
40 |
+
NLM- Malaria Data | Magnetic Resonance Imaging | Multiple Choice Question Answering | 75 | Acc
|
41 |
+
OCT & X-Ray 2017 | X-Ray, Optical Coherence Tomography | Multiple Choice Question Answering | 1301 | Acc
|
42 |
+
OLIVES | Fundus Photography | Multiple Choice Question Answering | 593 | Acc
|
43 |
+
PAD-UFES-20 | Dermoscopy | Multiple Choice Question Answering | 479 | Acc
|
44 |
+
PALM2019 | Fundus Photography | Multiple Choice Question Answering | 510 | Acc
|
45 |
+
Pulmonary Chest MC | X-Ray | Multiple Choice Question Answering | 38 | Acc
|
46 |
+
Pulmonary Chest Shenzhen | X-Ray | Multiple Choice Question Answering | 296 | Acc
|
47 |
+
RadImageNet | CT; Magnetic Resonance Imaging; Ultrasound | Multiple Choice Question Answering | 56697 | Acc
|
48 |
+
Retinal OCT-C8 | Optical Coherence Tomography | Multiple Choice Question Answering | 4016 | Acc
|
49 |
+
RUS CHN | X-Ray | Multiple Choice Question Answering | 1982 | Acc
|
50 |
+
SARS-CoV-2 CT-scan | CT | Multiple Choice Question Answering | 910 | Acc
|
51 |
+
Yangxi | Fundus Photography | Multiple Choice Question Answering | 1515 | Acc
|
52 |
+
|
53 |
+
## Usage
|
54 |
+
|
55 |
+
The following steps detail how to use [**Baichuan-Omni-1.5**](https://github.com/baichuan-inc/Baichuan-Omni-1.5) with OpenMM-Medical for evaluation using [**VLMEvalKit**](https://github.com/open-compass/VLMEvalKit):
|
56 |
+
|
57 |
---
|
58 |
+
|
59 |
+
### **1. Add `baichuan.py` in `VLMEvalKit/vlmeval/vlm`**
|
60 |
+
|
61 |
+
Download `baichuan.py` (which defines the `Baichuan` model class) and add it in `VLMEvalKit/vlmeval/vlm`.
|
62 |
+
|
63 |
---
|
64 |
+
|
65 |
+
### **2. Modify `VLMEvalKit/vlmeval/vlm/__init__.py`**
|
66 |
+
Add the following line:
|
67 |
+
```python
|
68 |
+
from .baichuan import Baichuan
|
69 |
+
```
|
70 |
+
|
71 |
+
---
|
72 |
+
|
73 |
+
### **3. Modify `VLMEvalKit/vlmeval/config.py`**
|
74 |
+
Import the `Baichuan` model:
|
75 |
+
```python
|
76 |
+
from vlmeval.vlm import Baichuan
|
77 |
+
```
|
78 |
+
|
79 |
+
Add the `Baichuan-omni` model configuration:
|
80 |
+
```python
|
81 |
+
'Baichuan-omni': partial(
|
82 |
+
Baichuan,
|
83 |
+
sft=True,
|
84 |
+
model_path='/your/path/to/the/model/checkpoint'
|
85 |
+
)
|
86 |
+
```
|
87 |
+
|
88 |
+
---
|
89 |
+
|
90 |
+
### **4. Modify `VLMEvalKit/vlmeval/dataset/image_mcq.py`**
|
91 |
+
Download `image_mcq.py` and add the following code to define the `OpenMMMedical` class. Ensure the `image_folder` points to your OpenMM-Medical dataset location:
|
92 |
+
|
93 |
+
```python
|
94 |
+
class OpenMMMedical(ImageMCQDataset):
|
95 |
+
|
96 |
+
@classmethod
|
97 |
+
def supported_datasets(cls):
|
98 |
+
return ['OpenMMMedical']
|
99 |
+
|
100 |
+
def load_data(self, dataset='OpenMMMedical'):
|
101 |
+
image_folder = "/your/path/to/OpenMM_Medical"
|
102 |
+
def generate_tsv(pth):
|
103 |
+
import csv
|
104 |
+
from pathlib import Path
|
105 |
+
tsv_file_path = os.path.join(LMUDataRoot(), f'{dataset}.tsv')
|
106 |
+
...
|
107 |
+
```
|
108 |
+
|
109 |
+
---
|
110 |
+
|
111 |
+
### **5. Update `VLMEvalKit/vlmeval/dataset/__init__.py`**
|
112 |
+
Import `OpenMMMedical`:
|
113 |
+
```python
|
114 |
+
from .image_mcq import (
|
115 |
+
ImageMCQDataset, MMMUDataset, CustomMCQDataset,
|
116 |
+
MUIRDataset, GMAIMMBenchDataset, MMERealWorld, OpenMMMedical
|
117 |
+
)
|
118 |
+
|
119 |
+
IMAGE_DATASET = [
|
120 |
+
ImageCaptionDataset, ImageYORNDataset, ImageMCQDataset, ImageVQADataset,
|
121 |
+
MathVision, MMMUDataset, OCRBench, MathVista, LLaVABench, MMVet,
|
122 |
+
MTVQADataset, TableVQABench, MMLongBench, VCRDataset, MMDUDataset,
|
123 |
+
DUDE, SlideVQA, MUIRDataset, GMAIMMBenchDataset, MMERealWorld, OpenMMMedical
|
124 |
+
]
|
125 |
+
```
|
126 |
+
|
127 |
+
---
|
128 |
+
|
129 |
+
### **6. Update `VLMEvalKit/vlmeval/dataset/image_base.py`**
|
130 |
+
Modify the `img_root_map` function:
|
131 |
+
```python
|
132 |
+
def img_root_map(dataset):
|
133 |
+
if 'OpenMMMedical' in dataset:
|
134 |
+
return 'OpenMMMedical'
|
135 |
+
if 'OCRVQA' in dataset:
|
136 |
+
return 'OCRVQA'
|
137 |
+
if 'COCO_VAL' == dataset:
|
138 |
+
return 'COCO'
|
139 |
+
if 'MMMU' in dataset:
|
140 |
+
return 'MMMU'
|
141 |
+
```
|
142 |
+
|
143 |
+
---
|
144 |
+
|
145 |
+
### **7. Run the Evaluation**
|
146 |
+
Execute the following command to start the evaluation:
|
147 |
+
```bash
|
148 |
+
python run.py --data OpenMMMedical --model Baichuan-omni --verbose
|
149 |
+
```
|
150 |
+
|
151 |
+
---
|
152 |
+
|
153 |
+
### **Notes:**
|
154 |
+
- Ensure that all paths (e.g., `/your/path/to/OpenMM_Medical`) are correctly specified.
|
155 |
+
- Confirm that the Baichuan model checkpoint is accessible at the defined `model_path`.
|
156 |
+
- Validate the dependencies and configurations of VLMEvalKit to avoid runtime issues.
|
157 |
+
|
158 |
+
With this setup, you should be able to evaluate OpenMM-Medical using Baichuan-Omni successfully.
|
baichuan.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from .base import BaseModel
|
4 |
+
from ..smp import *
|
5 |
+
from ..dataset import DATASET_TYPE
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
7 |
+
import torch
|
8 |
+
import json
|
9 |
+
|
10 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
11 |
+
|
12 |
+
|
13 |
+
def load_model_tokenizer(checkpoint_path):
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
15 |
+
checkpoint_path, trust_remote_code=True,
|
16 |
+
)
|
17 |
+
device_map = 'auto'
|
18 |
+
model = AutoModelForCausalLM.from_pretrained(
|
19 |
+
checkpoint_path,
|
20 |
+
device_map=device_map,
|
21 |
+
trust_remote_code=True,
|
22 |
+
torch_dtype=torch.bfloat16,
|
23 |
+
)
|
24 |
+
return model, tokenizer
|
25 |
+
|
26 |
+
|
27 |
+
class Baichuan(BaseModel):
|
28 |
+
INSTALL_REQ = False
|
29 |
+
INTERLEAVE = False
|
30 |
+
|
31 |
+
def __init__(self, sft=True, model_path=None):
|
32 |
+
assert model_path is not None
|
33 |
+
self.device = "cuda"
|
34 |
+
self.model_path = model_path
|
35 |
+
|
36 |
+
self.model, self.tokenizer = load_model_tokenizer(model_path)
|
37 |
+
self.model.bind_processor(self.tokenizer, training=False)
|
38 |
+
|
39 |
+
torch.cuda.empty_cache()
|
40 |
+
|
41 |
+
self.use_reserve_qa_prompt = sft
|
42 |
+
self.reserve_qa_start_prompt = "<C_Q>"
|
43 |
+
self.reserve_qa_end_prompt = "<C_A>"
|
44 |
+
|
45 |
+
self.task_prompt=""
|
46 |
+
self.options_system_prompt = ('Carefully read the following question and select the letter corresponding '
|
47 |
+
'to the correct answer. Highlight the applicable choices without giving '
|
48 |
+
'explanations. ')
|
49 |
+
self.wo_options_system_prompt = 'Carefully read the following question Answer the question directly. '
|
50 |
+
self.detail_system_prompt = 'Answer this question in detail and step by step. '
|
51 |
+
self.vqa_prompt = 'Answer the question using a single word or phrase. '
|
52 |
+
|
53 |
+
|
54 |
+
def generate_inner(self, message, dataset=None):
|
55 |
+
image_str, question = '', ''
|
56 |
+
for s in message:
|
57 |
+
if s['type'] == 'image':
|
58 |
+
if len(s["value"].split(".")[-1]) > 2:
|
59 |
+
image_dict = {"local": s["value"]}
|
60 |
+
else:
|
61 |
+
image_dict = {"base64": s["value"]}
|
62 |
+
image_str += f"<img_start_baichuan>{json.dumps(image_dict)}<img_end_baichuan>\n"
|
63 |
+
elif s['type'] == 'text':
|
64 |
+
question += s['value']
|
65 |
+
|
66 |
+
# sft version: <C_Q>...<C_A>
|
67 |
+
if self.use_reserve_qa_prompt:
|
68 |
+
prompt = "{}{}{}{}{}".format(self.reserve_qa_start_prompt, image_str, question, self.task_prompt, self.reserve_qa_end_prompt)
|
69 |
+
else:
|
70 |
+
prompt = "{}{}{}".format(image_str, question, self.task_prompt)
|
71 |
+
|
72 |
+
print("****************************** prompt ******************************")
|
73 |
+
print(prompt)
|
74 |
+
print("********************************************************************")
|
75 |
+
|
76 |
+
with torch.inference_mode():
|
77 |
+
ret = self.model.processor(prompt)
|
78 |
+
input_ids = ret.input_ids
|
79 |
+
try:
|
80 |
+
ret = self.model.generate(
|
81 |
+
inputs=torch.LongTensor([input_ids]).cuda(),
|
82 |
+
images=[torch.tensor(img, dtype=torch.float32).cuda() for img in images] if ret.images is not None else None,
|
83 |
+
patch_nums=ret.patch_nums,
|
84 |
+
images_grid=ret.images_grid,
|
85 |
+
max_new_tokens=1024, do_sample=False, top_k=5, top_p=0.85, temperature=0,
|
86 |
+
num_return_sequences=1, repetition_penalty=1.05,
|
87 |
+
use_cache=False
|
88 |
+
)
|
89 |
+
ret = self.tokenizer.batch_decode(ret[:, torch.LongTensor([input_ids]).to(self.device).shape[1]:], skip_special_tokens=True)[0].strip()
|
90 |
+
except Exception as e:
|
91 |
+
print(e)
|
92 |
+
ret = ""
|
93 |
+
|
94 |
+
response = ret
|
95 |
+
|
96 |
+
print("=========================================== response ===========================================")
|
97 |
+
print(f"\033[32m{response}\033[0m")
|
98 |
+
print("================================================================================================")
|
99 |
+
return response
|
100 |
+
|
101 |
+
|
102 |
+
def use_custom_prompt(self, dataset):
|
103 |
+
if dataset is not None and listinstr(['M3GIA'], dataset):
|
104 |
+
return False
|
105 |
+
if listinstr(['MCQ', 'VQA'], DATASET_TYPE(dataset)):
|
106 |
+
return True
|
107 |
+
elif dataset is not None and listinstr(['HallusionBench'], dataset):
|
108 |
+
return True
|
109 |
+
return False
|
110 |
+
|
111 |
+
|
112 |
+
def build_prompt(self, line, dataset=None):
|
113 |
+
if isinstance(line, int):
|
114 |
+
line = self.data.iloc[line]
|
115 |
+
|
116 |
+
tgt_path = self.dump_image(line, dataset)
|
117 |
+
system_prompt = ''
|
118 |
+
|
119 |
+
question = line['question']
|
120 |
+
if DATASET_TYPE(dataset) == 'MCQ':
|
121 |
+
options = {
|
122 |
+
cand: line[cand]
|
123 |
+
for cand in string.ascii_uppercase
|
124 |
+
if cand in line and not pd.isna(line[cand])
|
125 |
+
}
|
126 |
+
options_prompt = 'Options:\n'
|
127 |
+
for key, item in options.items():
|
128 |
+
options_prompt += f'{key}. {item}\n'
|
129 |
+
hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
|
130 |
+
prompt = ''
|
131 |
+
if hint is not None:
|
132 |
+
prompt += f'Hint: {hint}\n'
|
133 |
+
prompt += f'Question: {question}\n'
|
134 |
+
if len(options):
|
135 |
+
prompt += options_prompt
|
136 |
+
if 'MMBench' in dataset:
|
137 |
+
prompt += 'Please select the correct answer from the options above. \n'
|
138 |
+
else:
|
139 |
+
system_prompt = self.options_system_prompt + '\nPlease just indicate your choice.'
|
140 |
+
else:
|
141 |
+
system_prompt = self.wo_options_system_prompt
|
142 |
+
if 'MMMU' in dataset: # Corner Case
|
143 |
+
prompt = system_prompt + '\n' + prompt
|
144 |
+
system_prompt = ''
|
145 |
+
elif dataset is not None and listinstr(['HallusionBench'], dataset):
|
146 |
+
question = line['question'] + ' Yes or No?'
|
147 |
+
prompt = question
|
148 |
+
elif dataset is not None and listinstr(['MME'], dataset):
|
149 |
+
question = line['question'] + ' Yes or No?'
|
150 |
+
prompt = question
|
151 |
+
elif dataset is not None and listinstr(['OCRBench'], dataset):
|
152 |
+
system_prompt = self.vqa_prompt
|
153 |
+
question = line['question']
|
154 |
+
prompt = question
|
155 |
+
elif DATASET_TYPE(dataset) == 'VQA':
|
156 |
+
if listinstr(['LLaVABench', 'MMLongBench_DOC'], dataset):
|
157 |
+
system_prompt = ''
|
158 |
+
prompt = question
|
159 |
+
elif listinstr(['MMVet'], dataset):
|
160 |
+
system_prompt = self.detail_system_prompt
|
161 |
+
prompt = question
|
162 |
+
elif listinstr(['ChartQA'], dataset):
|
163 |
+
system_prompt = 'Please answer the question using a single word. '
|
164 |
+
prompt = question
|
165 |
+
else:
|
166 |
+
system_prompt = self.vqa_prompt
|
167 |
+
prompt = question
|
168 |
+
|
169 |
+
msgs = []
|
170 |
+
if system_prompt:
|
171 |
+
msgs.append(dict(type='text', value=system_prompt))
|
172 |
+
if isinstance(tgt_path, list):
|
173 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
174 |
+
else:
|
175 |
+
msgs = [dict(type='image', value=tgt_path)]
|
176 |
+
msgs.append(dict(type='text', value=prompt))
|
177 |
+
|
178 |
+
return msgs
|
image_mcq.py
ADDED
@@ -0,0 +1,1082 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import json
|
3 |
+
from .image_base import ImageBaseDataset
|
4 |
+
from .utils import build_judge, DEBUG_MESSAGE
|
5 |
+
from ..smp import *
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
MMMB_URLS = {
|
9 |
+
'MMMB_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ar.tsv',
|
10 |
+
'MMMB_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_cn.tsv',
|
11 |
+
'MMMB_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_en.tsv',
|
12 |
+
'MMMB_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_pt.tsv',
|
13 |
+
'MMMB_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ru.tsv',
|
14 |
+
'MMMB_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_tr.tsv',
|
15 |
+
}
|
16 |
+
|
17 |
+
MTL_MMBench_URLS = {
|
18 |
+
'MMBench_dev_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ar.tsv',
|
19 |
+
'MMBench_dev_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_cn.tsv',
|
20 |
+
'MMBench_dev_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_en.tsv',
|
21 |
+
'MMBench_dev_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_pt.tsv',
|
22 |
+
'MMBench_dev_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_tr.tsv',
|
23 |
+
'MMBench_dev_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ru.tsv',
|
24 |
+
}
|
25 |
+
|
26 |
+
MMMB_MD5 = {
|
27 |
+
'MMMB_ar': 'f3a18b6385f1d9701840aa42de27aead', 'MMMB_cn': '13ed82fa89730037292fcaa27f08f430',
|
28 |
+
'MMMB_en': '1cd781a71ec5a2983c090b84105d6a01', 'MMMB_pt': '548ea2b3bb2da991790386f0015d30d1',
|
29 |
+
'MMMB_ru': 'ce1cc8a0533425ab0d86b326ebfc2984', 'MMMB_tr': '0733739d43090327975294292bc5cd67'
|
30 |
+
}
|
31 |
+
|
32 |
+
MTL_MMBench_MD5 = {
|
33 |
+
'MMBench_dev_ar': '4271b4a0d0200e1a86380a878e0d64a4', 'MMBench_dev_cn': '2ed5135326fed02c8e51ea50dda8222f',
|
34 |
+
'MMBench_dev_en': 'd9ab776fc018b3d45785e9a5c23431c2', 'MMBench_dev_pt': '4ddfbcd27ef12444b908c03831cd0295',
|
35 |
+
'MMBench_dev_tr': '4fab39d501389d3d6cc90264bb708f11', 'MMBench_dev_ru': '5ba1171ff2e68f80637bf78349e402a5'
|
36 |
+
}
|
37 |
+
|
38 |
+
|
39 |
+
class ImageMCQDataset(ImageBaseDataset):
|
40 |
+
|
41 |
+
TYPE = 'MCQ'
|
42 |
+
|
43 |
+
DATASET_URL = {
|
44 |
+
# MMBench v1.0
|
45 |
+
'MMBench_DEV_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN.tsv',
|
46 |
+
'MMBench_TEST_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN.tsv',
|
47 |
+
'MMBench_DEV_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN.tsv',
|
48 |
+
'MMBench_TEST_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN.tsv',
|
49 |
+
'MMBench': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench.tsv', # Internal
|
50 |
+
'MMBench_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN.tsv', # Internal
|
51 |
+
# MMBench v1.1
|
52 |
+
'MMBench_DEV_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN_V11.tsv',
|
53 |
+
'MMBench_TEST_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN_V11.tsv',
|
54 |
+
'MMBench_DEV_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN_V11.tsv',
|
55 |
+
'MMBench_TEST_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN_V11.tsv',
|
56 |
+
'MMBench_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_V11.tsv', # Internal
|
57 |
+
'MMBench_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN_V11.tsv', # Internal
|
58 |
+
# SEEDBench Series
|
59 |
+
'SEEDBench_IMG': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench_IMG.tsv',
|
60 |
+
'SEEDBench2': 'https://huggingface.co/datasets/VLMEval/SEEDBench2/resolve/main/SEEDBench2.tsv',
|
61 |
+
'SEEDBench2_Plus': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench2_Plus.tsv',
|
62 |
+
# ScienceQA Series
|
63 |
+
'ScienceQA_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_VAL.tsv',
|
64 |
+
'ScienceQA_TEST': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_TEST.tsv',
|
65 |
+
# MMT-Bench
|
66 |
+
'MMT-Bench_ALL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL_MI.tsv',
|
67 |
+
'MMT-Bench_ALL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL.tsv',
|
68 |
+
'MMT-Bench_VAL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL_MI.tsv',
|
69 |
+
'MMT-Bench_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL.tsv',
|
70 |
+
# AesBench
|
71 |
+
'AesBench_VAL': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_VAL.tsv',
|
72 |
+
'AesBench_TEST': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_TEST.tsv',
|
73 |
+
# Q-Bench1
|
74 |
+
'Q-Bench1_VAL': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_VAL.tsv',
|
75 |
+
'Q-Bench1_TEST': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_TEST.tsv',
|
76 |
+
# A-Bench
|
77 |
+
'A-Bench_VAL': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_VAL.tsv',
|
78 |
+
'A-Bench_TEST': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_TEST.tsv',
|
79 |
+
# R-Bench
|
80 |
+
'R-Bench-Dis': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/blob/main/R-bench-dis.tsv',
|
81 |
+
'R-Bench-Ref': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/blob/main/R-bench-ref.tsv',
|
82 |
+
# Other Benchmarks
|
83 |
+
'CCBench': 'https://opencompass.openxlab.space/utils/VLMEval/CCBench.tsv',
|
84 |
+
'AI2D_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST.tsv',
|
85 |
+
'AI2D_TEST_NO_MASK': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST_NO_MASK.tsv',
|
86 |
+
'MMStar': 'https://opencompass.openxlab.space/utils/VLMEval/MMStar.tsv',
|
87 |
+
'RealWorldQA': 'https://opencompass.openxlab.space/utils/VLMEval/RealWorldQA.tsv',
|
88 |
+
'MLLMGuard_DS': 'https://opencompass.openxlab.space/utils/VLMEval/MLLMGuard_DS.tsv',
|
89 |
+
'BLINK': 'https://opencompass.openxlab.space/utils/VLMEval/BLINK.tsv',
|
90 |
+
'TaskMeAnything_v1_imageqa_random': (
|
91 |
+
'https://huggingface.co/datasets/weikaih/TaskMeAnything-v1-imageqa-random/'
|
92 |
+
'resolve/main/TaskMeAnything-v1-imageqa-random.tsv'
|
93 |
+
),
|
94 |
+
'A-OKVQA': 'https://huggingface.co/datasets/Allen8/A-OKVQA/resolve/main/a-okvqa.tsv',
|
95 |
+
'WorldMedQA-V': 'https://opencompass.openxlab.space/utils/VLMEval/WorldMedQA-V.tsv',
|
96 |
+
'VisOnlyQA-VLMEvalKit': (
|
97 |
+
'https://huggingface.co/datasets/ryokamoi/VisOnlyQA_Eval_Real/'
|
98 |
+
'resolve/main/visonlyqa_vlmevalkit.tsv'
|
99 |
+
),
|
100 |
+
'3DSRBench': (
|
101 |
+
'https://huggingface.co/datasets/ccvl/3DSRBench/'
|
102 |
+
'resolve/main/3dsrbench_v1_vlmevalkit_circular.tsv'
|
103 |
+
),
|
104 |
+
}
|
105 |
+
|
106 |
+
DATASET_MD5 = {
|
107 |
+
# MMBench v1.0
|
108 |
+
'MMBench_DEV_EN': 'b6caf1133a01c6bb705cf753bb527ed8',
|
109 |
+
'MMBench_TEST_EN': '6939fadb0ce626fefc0bdc9c64efc528',
|
110 |
+
'MMBench_DEV_CN': '08b8fc3324a5ed74155350f57be69fbd',
|
111 |
+
'MMBench_TEST_CN': '7e1239baf0ee4c8b513e19705a0f317e',
|
112 |
+
'MMBench': '4115aea3383f3dd0083be6a633e0f820', # Internal Only
|
113 |
+
'MMBench_CN': '2e053ffc90ea598b1feae13c36dc13ee', # Internal Only
|
114 |
+
# MMBench v1.1
|
115 |
+
'MMBench_DEV_EN_V11': '30c05be8f2f347a50be25aa067248184',
|
116 |
+
'MMBench_TEST_EN_V11': '26f0f15381a21720255091d3e0316ce6',
|
117 |
+
'MMBench_DEV_CN_V11': '593f9b5f6bea453d870a798b34ae4f37',
|
118 |
+
'MMBench_TEST_CN_V11': '74bbe4556dac745613c7cbe5ad787050',
|
119 |
+
'MMBench_V11': 'b9276414f57af1308dcc4d0cd9b42e7c', # Internal Only
|
120 |
+
'MMBench_CN_V11': '95f6980dd1b4de38e3cbffe0305a3f25', # Internal Only
|
121 |
+
# SEEDBench
|
122 |
+
'SEEDBench_IMG': '68017231464752261a2526d6ca3a10c0',
|
123 |
+
'SEEDBench2': '4ec15cf864c4f16274112284f531813e',
|
124 |
+
'SEEDBench2_Plus': 'e32d3216dc4f452b0fe497a52015d1fd',
|
125 |
+
# ScienceQA
|
126 |
+
'ScienceQA_VAL': '96320d05e142e585e7204e72affd29f3',
|
127 |
+
'ScienceQA_TEST': 'e42e9e00f9c59a80d8a5db35bc32b71f',
|
128 |
+
# MMT-Bench
|
129 |
+
'MMT-Bench_ALL_MI': '5272157097e19cdd7cb41e412ab3b7c7',
|
130 |
+
'MMT-Bench_ALL': 'b273a2f4c596fe4f2605de0494cd632f',
|
131 |
+
'MMT-Bench_VAL_MI': 'c7d7b998eb5cd9aa36c7d4f721472462',
|
132 |
+
'MMT-Bench_VAL': '8dd4b730f53dbf9c3aed90ca31c928e0',
|
133 |
+
# AesBench
|
134 |
+
'AesBench_VAL': '3edb0c319e9187aa0b97fe7a11700a8c',
|
135 |
+
'AesBench_TEST': '58b1f7ba2cc32e1d68896d6ee716bbf8',
|
136 |
+
# Q-Bench1
|
137 |
+
'Q-Bench1_VAL': '837bdb6cd2da571713543462815187b7',
|
138 |
+
'Q-Bench1_TEST': '15e759bfd58c9d5f30b23a317d347153',
|
139 |
+
# A-Bench
|
140 |
+
'A-Bench_VAL': '218563ec50d34bb336c814143a5bb9c1',
|
141 |
+
'A-Bench_TEST': '567013fb033a20cf23f51d8e865bd16c',
|
142 |
+
# R-Bench
|
143 |
+
'R-Bench-Dis': 'd6e961dbfc43350688af2560226830b4',
|
144 |
+
'R-Bench-Ref': '270c1cb555acb523f3fdb178ed57021d',
|
145 |
+
# Other Benchmarks
|
146 |
+
'CCBench': 'f5dde47f24dc5a6fb6e595b409b466ac',
|
147 |
+
'AI2D_TEST': '0f593e0d1c7df9a3d69bf1f947e71975',
|
148 |
+
'AI2D_TEST_NO_MASK': 'fd8f463634d4fe9fbd23b876e8eea5be',
|
149 |
+
'MMStar': 'e1ecd2140806c1b1bbf54b43372efb9e',
|
150 |
+
'RealWorldQA': '4de008f55dc4fd008ca9e15321dc44b7',
|
151 |
+
'MLLMGuard_DS': '975fc0dd7119386e198c37d71e274b3f',
|
152 |
+
'BLINK': '3b6649b6a662184ea046908e5506260e',
|
153 |
+
'TaskMeAnything_v1_imageqa_random': '023fef69e2ca21827afb77c5ec3bc889',
|
154 |
+
'WorldMedQA-V': '441e63875e30c87f5750528b57b41285',
|
155 |
+
"VisOnlyQA-VLMEvalKit": 'cf460a31d2acb8d3a7cecd0e69298bfa',
|
156 |
+
'3DSRBench': '13a99f33164dc1b9faf0e8b8b01fd6f2',
|
157 |
+
}
|
158 |
+
|
159 |
+
DATASET_URL.update(MMMB_URLS)
|
160 |
+
DATASET_URL.update(MTL_MMBench_URLS)
|
161 |
+
DATASET_MD5.update(MMMB_MD5)
|
162 |
+
DATASET_MD5.update(MTL_MMBench_MD5)
|
163 |
+
|
164 |
+
def build_prompt(self, line):
|
165 |
+
|
166 |
+
if isinstance(line, int):
|
167 |
+
line = self.data.iloc[line]
|
168 |
+
|
169 |
+
if self.meta_only:
|
170 |
+
tgt_path = toliststr(line['image_path'])
|
171 |
+
else:
|
172 |
+
tgt_path = self.dump_image(line)
|
173 |
+
|
174 |
+
question = line['question']
|
175 |
+
options = {
|
176 |
+
cand: line[cand]
|
177 |
+
for cand in string.ascii_uppercase
|
178 |
+
if cand in line and not pd.isna(line[cand])
|
179 |
+
}
|
180 |
+
options_prompt = 'Options:\n'
|
181 |
+
for key, item in options.items():
|
182 |
+
options_prompt += f'{key}. {item}\n'
|
183 |
+
hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
|
184 |
+
prompt = ''
|
185 |
+
if hint is not None:
|
186 |
+
prompt += f'Hint: {hint}\n'
|
187 |
+
prompt += f'Question: {question}\n'
|
188 |
+
if len(options):
|
189 |
+
prompt += options_prompt
|
190 |
+
prompt += 'Please select the correct answer from the options above. \n'
|
191 |
+
|
192 |
+
msgs = []
|
193 |
+
if isinstance(tgt_path, list):
|
194 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
195 |
+
else:
|
196 |
+
msgs = [dict(type='image', value=tgt_path)]
|
197 |
+
msgs.append(dict(type='text', value=prompt))
|
198 |
+
|
199 |
+
return msgs
|
200 |
+
|
201 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
202 |
+
from .utils.multiple_choice import report_acc, report_acc_MMT, mcq_circular_eval, mcq_vanilla_eval
|
203 |
+
# assert dataset is not None
|
204 |
+
dataset_map = {
|
205 |
+
'MMBench_TEST_EN': 'MMBench', 'MMBench_TEST_EN_V11': 'MMBench_V11',
|
206 |
+
'MMBench_TEST_CN': 'MMBench_CN', 'MMBench_TEST_CN_V11': 'MMBench_CN_V11'
|
207 |
+
}
|
208 |
+
dataset = self.dataset_name
|
209 |
+
if dataset in dataset_map:
|
210 |
+
dataset = dataset_map[dataset]
|
211 |
+
nproc = judge_kwargs.pop('nproc', 4)
|
212 |
+
|
213 |
+
circular = False
|
214 |
+
if listinstr(['mmbench', 'ccbench'], dataset.lower()):
|
215 |
+
data = load(eval_file)
|
216 |
+
data['index'] = [int(x) for x in data['index']]
|
217 |
+
dump(data, eval_file)
|
218 |
+
circular = True
|
219 |
+
|
220 |
+
suffix = eval_file.split('.')[-1]
|
221 |
+
model = judge_kwargs.get('model', 'exact_matching')
|
222 |
+
assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
|
223 |
+
name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
|
224 |
+
name_str = name_str_map[model] if model in name_str_map else model
|
225 |
+
|
226 |
+
if model == 'exact_matching':
|
227 |
+
model = None
|
228 |
+
elif gpt_key_set():
|
229 |
+
model = build_judge(**judge_kwargs)
|
230 |
+
if not model.working():
|
231 |
+
warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
|
232 |
+
warnings.warn(DEBUG_MESSAGE)
|
233 |
+
model = None
|
234 |
+
else:
|
235 |
+
warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
|
236 |
+
model = None
|
237 |
+
|
238 |
+
result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
|
239 |
+
|
240 |
+
data = load(eval_file)
|
241 |
+
data = data.sort_values(by='index')
|
242 |
+
data['prediction'] = [str(x) for x in data['prediction']]
|
243 |
+
# If not choice label, then use lower case
|
244 |
+
for k in data.keys():
|
245 |
+
data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
|
246 |
+
|
247 |
+
meta = self.data
|
248 |
+
meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
|
249 |
+
data_map = {x: y for x, y in zip(data['index'], data['question'])}
|
250 |
+
for k in data_map:
|
251 |
+
assert k in meta_q_map, (
|
252 |
+
f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
|
253 |
+
)
|
254 |
+
|
255 |
+
if circular:
|
256 |
+
data = mcq_circular_eval(model, data, meta, nproc, result_file, self.dataset_name)
|
257 |
+
else:
|
258 |
+
data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
|
259 |
+
|
260 |
+
# load split
|
261 |
+
dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
262 |
+
data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
263 |
+
|
264 |
+
# May have different report acc functions for different datasets
|
265 |
+
if 'MMT' in dataset:
|
266 |
+
acc = report_acc_MMT(data)
|
267 |
+
else:
|
268 |
+
acc = report_acc(data)
|
269 |
+
|
270 |
+
score_file = eval_file.replace(f'.{suffix}', '_acc.csv')
|
271 |
+
dump(acc, score_file)
|
272 |
+
|
273 |
+
if dataset == 'AesBench_VAL':
|
274 |
+
warnings.warn('Note that AesBench VAL is just a toy version of AesBench TEST. For full results, \
|
275 |
+
please evaluate on AesBench TEST. The AesBench TEST dataset is more than 20 times \
|
276 |
+
larger than the VAL dataset and the leaderboard results are based on AesBench TEST.')
|
277 |
+
if dataset == 'VisOnlyQA-VLMEvalKit':
|
278 |
+
warnings.warn('Note that the results on VisOnlyQA-VLMEvalKit are different from the results on \
|
279 |
+
the original VisOnlyQA. VisOnlyQA-VLMEvalKit does not include the \
|
280 |
+
chemistry__shape_multi split and uses a different evaluation prompt. Please \
|
281 |
+
explicitly specify the version of the dataset when you report results.')
|
282 |
+
|
283 |
+
return acc
|
284 |
+
|
285 |
+
|
286 |
+
class OpenMMMedical(ImageMCQDataset):
|
287 |
+
@classmethod
|
288 |
+
def supported_datasets(cls):
|
289 |
+
return ['OpenMMMedical']
|
290 |
+
|
291 |
+
def load_data(self, dataset='OpenMMMedical'):
|
292 |
+
image_folder = "/your/path/to/OpenMM_Medical"
|
293 |
+
def generate_tsv(pth):
|
294 |
+
import csv
|
295 |
+
from pathlib import Path
|
296 |
+
tsv_file_path = os.path.join(LMUDataRoot(), f'{dataset}.tsv')
|
297 |
+
|
298 |
+
if os.path.exists(tsv_file_path):
|
299 |
+
print(f'{tsv_file_path} already exists.')
|
300 |
+
return
|
301 |
+
|
302 |
+
path = Path(pth)
|
303 |
+
json_files = [str(f) for f in path.rglob('*.json')]
|
304 |
+
fieldnames = ["index", "dataset", "question_id", "question_type", "question", "A", "B", "C", "D", "E", "answer", "image_path"]
|
305 |
+
index = 0
|
306 |
+
with open(tsv_file_path, 'w', encoding='utf-8', newline='') as tsv_file:
|
307 |
+
writer = csv.DictWriter(tsv_file, fieldnames=fieldnames, delimiter='\t')
|
308 |
+
writer.writeheader()
|
309 |
+
for json_file in json_files:
|
310 |
+
data_name = json_file.split('/')[-1].split('.')[0]
|
311 |
+
with open(json_file, 'r', encoding='utf-8') as f:
|
312 |
+
data = json.load(f)
|
313 |
+
for row in data:
|
314 |
+
line = {}
|
315 |
+
line['index'] = index
|
316 |
+
line['dataset'] = row['dataset']
|
317 |
+
line['question_id'] = row['question_id']
|
318 |
+
line['question_type'] = row['question_type']
|
319 |
+
line['question'] = row['question']
|
320 |
+
choices_letter = ["A", "B", "C", "D", "E"]
|
321 |
+
for i in range(len(choices_letter)):
|
322 |
+
if f"option_{choices_letter[i]}" in row:
|
323 |
+
line[choices_letter[i]] = row[f"option_{choices_letter[i]}"]
|
324 |
+
if row[f"option_{choices_letter[i]}"] == row['gt_answer']:
|
325 |
+
line['answer'] = choices_letter[i]
|
326 |
+
else:
|
327 |
+
break
|
328 |
+
line['image_path'] = os.path.join(image_folder, row['image_path'])
|
329 |
+
index += 1
|
330 |
+
writer.writerow(line)
|
331 |
+
print(f'TSV file saved to {tsv_file_path}')
|
332 |
+
|
333 |
+
generate_tsv(image_folder)
|
334 |
+
update_flag = True
|
335 |
+
|
336 |
+
data_path = os.path.join(LMUDataRoot(), f'{dataset}.tsv')
|
337 |
+
if file_size(data_path, 'GB') > 1:
|
338 |
+
local_path = data_path.replace('.tsv', '_local.tsv')
|
339 |
+
if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None) or update_flag:
|
340 |
+
from vlmeval.tools import LOCALIZE
|
341 |
+
LOCALIZE(data_path, local_path)
|
342 |
+
data_path = local_path
|
343 |
+
return load(data_path)
|
344 |
+
|
345 |
+
# Given one data record, return the built prompt (a multi-modal message), can override
|
346 |
+
def build_prompt(self, line):
|
347 |
+
if isinstance(line, int):
|
348 |
+
line = self.data.iloc[line]
|
349 |
+
|
350 |
+
if self.meta_only:
|
351 |
+
tgt_path = toliststr(line['image_path'])
|
352 |
+
else:
|
353 |
+
tgt_path = self.dump_image(line)
|
354 |
+
|
355 |
+
question = line['question']
|
356 |
+
options = {
|
357 |
+
cand: line[cand]
|
358 |
+
for cand in string.ascii_uppercase
|
359 |
+
if cand in line and not pd.isna(line[cand])
|
360 |
+
}
|
361 |
+
options_prompt = 'Options:\n'
|
362 |
+
for key, item in options.items():
|
363 |
+
options_prompt += f'{key}. {item}\n'
|
364 |
+
hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
|
365 |
+
prompt = ''
|
366 |
+
if hint is not None:
|
367 |
+
prompt += f'Hint: {hint}\n'
|
368 |
+
prompt += f'Question: {question}\n'
|
369 |
+
prompt += options_prompt
|
370 |
+
prompt += "Answer with the option's letter from the given choices directly.\n"
|
371 |
+
# prompt += "Please select the correct answer from the options above. \n"
|
372 |
+
|
373 |
+
msgs = []
|
374 |
+
if tgt_path:
|
375 |
+
if isinstance(tgt_path, list):
|
376 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
377 |
+
else:
|
378 |
+
msgs = [dict(type='image', value=tgt_path)]
|
379 |
+
msgs.append(dict(type='text', value=prompt))
|
380 |
+
return msgs
|
381 |
+
|
382 |
+
def report_acc_by_groups(self, df, group_column):
|
383 |
+
res = defaultdict(list)
|
384 |
+
|
385 |
+
# Check for the 'split' column
|
386 |
+
if 'split' in df:
|
387 |
+
splits = list(set(df['split']))
|
388 |
+
res['split'] = splits
|
389 |
+
else:
|
390 |
+
df['split'] = ['none'] * len(df)
|
391 |
+
res['split'] = ['none']
|
392 |
+
|
393 |
+
res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
|
394 |
+
|
395 |
+
if group_column not in df:
|
396 |
+
raise ValueError(f"Column '{group_column}' not found in dataframe.") # noqa: E713
|
397 |
+
|
398 |
+
abilities = list(set(df[group_column]))
|
399 |
+
abilities = ['None' if isinstance(ab, float) and pd.isna(ab) else ab for ab in abilities]
|
400 |
+
abilities.sort()
|
401 |
+
|
402 |
+
for ab in abilities:
|
403 |
+
ab_name = ab
|
404 |
+
sub_df = df[df[group_column] == ab]
|
405 |
+
res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
|
406 |
+
|
407 |
+
return pd.DataFrame(res)
|
408 |
+
|
409 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
410 |
+
from .utils.multiple_choice import report_acc, mcq_vanilla_eval
|
411 |
+
nproc = judge_kwargs.pop('nproc', 4)
|
412 |
+
|
413 |
+
suffix = eval_file.split('.')[-1]
|
414 |
+
model = judge_kwargs.get('model', 'exact_matching')
|
415 |
+
assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125', 'gpt-4o']
|
416 |
+
name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4', 'gpt-4o': 'gpt4o'}
|
417 |
+
name_str = name_str_map[model] if model in name_str_map else model
|
418 |
+
|
419 |
+
if model == 'exact_matching':
|
420 |
+
model = None
|
421 |
+
elif gpt_key_set():
|
422 |
+
model = build_judge(**judge_kwargs)
|
423 |
+
if not model.working():
|
424 |
+
warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
|
425 |
+
warnings.warn(DEBUG_MESSAGE)
|
426 |
+
model = None
|
427 |
+
else:
|
428 |
+
warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
|
429 |
+
model = None
|
430 |
+
|
431 |
+
result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
|
432 |
+
|
433 |
+
data = load(eval_file)
|
434 |
+
data = data.sort_values(by='index')
|
435 |
+
data['prediction'] = [str(x) for x in data['prediction']]
|
436 |
+
# If not choice label, then use lower case
|
437 |
+
for k in data.keys():
|
438 |
+
data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
|
439 |
+
|
440 |
+
meta = self.data
|
441 |
+
meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
|
442 |
+
data_map = {x: y for x, y in zip(data['index'], data['question'])}
|
443 |
+
for k in data_map:
|
444 |
+
assert k in meta_q_map, (
|
445 |
+
f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
|
446 |
+
)
|
447 |
+
|
448 |
+
data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
|
449 |
+
|
450 |
+
# load split
|
451 |
+
dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
452 |
+
data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
453 |
+
|
454 |
+
acc = report_acc(data)
|
455 |
+
|
456 |
+
for group_col in ['dataset']:
|
457 |
+
acc_grouped = self.report_acc_by_groups(data, group_col)
|
458 |
+
score_file_grouped = eval_file.replace(f'.{suffix}', f'_{group_col}_acc.csv')
|
459 |
+
dump(acc_grouped, score_file_grouped)
|
460 |
+
|
461 |
+
return acc
|
462 |
+
|
463 |
+
|
464 |
+
class MMMUDataset(ImageMCQDataset):
|
465 |
+
|
466 |
+
DATASET_URL = {
|
467 |
+
'MMMU_DEV_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_DEV_VAL.tsv',
|
468 |
+
'MMMU_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_TEST.tsv',
|
469 |
+
}
|
470 |
+
|
471 |
+
DATASET_MD5 = {
|
472 |
+
'MMMU_DEV_VAL': '585e8ad75e73f75dcad265dfd0417d64',
|
473 |
+
'MMMU_TEST': 'c19875d11a2d348d07e5eb4bdf33166d',
|
474 |
+
}
|
475 |
+
|
476 |
+
@staticmethod
|
477 |
+
def split_MMMU(msgs):
|
478 |
+
text, images = None, []
|
479 |
+
for s in msgs:
|
480 |
+
if s['type'] == 'image':
|
481 |
+
images.append(s['value'])
|
482 |
+
elif s['type'] == 'text':
|
483 |
+
assert text is None
|
484 |
+
text = s['value']
|
485 |
+
text_segs = text.split('<image ')
|
486 |
+
if len(text_segs) == 1:
|
487 |
+
return msgs
|
488 |
+
|
489 |
+
segs = [dict(type='text', value=text_segs[0])]
|
490 |
+
for i, seg in enumerate(text_segs):
|
491 |
+
if i == 0:
|
492 |
+
continue
|
493 |
+
assert istype(seg[0], int) and seg[1] == '>'
|
494 |
+
image_idx = int(seg[0]) - 1
|
495 |
+
segs.append(dict(type='image', value=images[image_idx]))
|
496 |
+
segs.append(dict(type='text', value=seg[2:]))
|
497 |
+
return segs
|
498 |
+
|
499 |
+
def build_prompt(self, line):
|
500 |
+
msgs = super().build_prompt(line)
|
501 |
+
msgs = self.split_MMMU(msgs)
|
502 |
+
return msgs
|
503 |
+
|
504 |
+
|
505 |
+
class MUIRDataset(ImageMCQDataset):
|
506 |
+
|
507 |
+
DATASET_URL = {
|
508 |
+
'MUIRBench': 'http://opencompass.openxxlab.com/utils/VLMEval/MUIRBench.tsv'
|
509 |
+
}
|
510 |
+
|
511 |
+
DATASET_MD5 = {
|
512 |
+
'MUIRBench': '2e5e6fd7699761b08a7cb3ab8c0c2ec8'
|
513 |
+
}
|
514 |
+
|
515 |
+
@staticmethod
|
516 |
+
def split_MUIR(msgs):
|
517 |
+
text, images = None, []
|
518 |
+
|
519 |
+
# Separate images and text from msgs
|
520 |
+
for s in msgs:
|
521 |
+
if s['type'] == 'image':
|
522 |
+
images.append(s['value'])
|
523 |
+
elif s['type'] == 'text':
|
524 |
+
assert text is None # Ensure only one text entry is expected
|
525 |
+
text = s['value']
|
526 |
+
|
527 |
+
# Split text by <image> tags
|
528 |
+
text_segs = text.split('<image>')
|
529 |
+
|
530 |
+
# Initialize the segments list
|
531 |
+
segs = []
|
532 |
+
|
533 |
+
# Iterate through the text segments and images
|
534 |
+
for i, seg in enumerate(text_segs):
|
535 |
+
# Append the image if this is not the first segment and there are still images left
|
536 |
+
if i > 0 and i - 1 < len(images):
|
537 |
+
segs.append(dict(type='image', value=images[i - 1]))
|
538 |
+
# Append the text segment (if it's non-empty)
|
539 |
+
if len(seg) > 0:
|
540 |
+
segs.append(dict(type='text', value=seg))
|
541 |
+
|
542 |
+
return segs
|
543 |
+
|
544 |
+
def build_prompt(self, line):
|
545 |
+
|
546 |
+
if isinstance(line, int):
|
547 |
+
line = self.data.iloc[line]
|
548 |
+
|
549 |
+
if self.meta_only:
|
550 |
+
tgt_path = toliststr(line['image_path'])
|
551 |
+
else:
|
552 |
+
tgt_path = self.dump_image(line)
|
553 |
+
|
554 |
+
question = line['question']
|
555 |
+
options = {
|
556 |
+
cand: line[cand]
|
557 |
+
for cand in string.ascii_uppercase
|
558 |
+
if cand in line and not pd.isna(line[cand])
|
559 |
+
}
|
560 |
+
# options_prompt = ''
|
561 |
+
options_prompt = '\n'.join([f'{key}. {item}' for key, item in options.items()])
|
562 |
+
# for key, item in options.items():
|
563 |
+
# options_prompt += f'{key}. {item}\n'
|
564 |
+
|
565 |
+
prompt = ''
|
566 |
+
|
567 |
+
prompt += f'{question}\n'
|
568 |
+
if len(options):
|
569 |
+
prompt += options_prompt
|
570 |
+
prompt += "\nAnswer with the option's letter from the given choices directly."
|
571 |
+
|
572 |
+
msgs = []
|
573 |
+
if isinstance(tgt_path, list):
|
574 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
575 |
+
else:
|
576 |
+
msgs = [dict(type='image', value=tgt_path)]
|
577 |
+
msgs.append(dict(type='text', value=prompt))
|
578 |
+
|
579 |
+
msgs = self.split_MUIR(msgs)
|
580 |
+
return msgs
|
581 |
+
|
582 |
+
|
583 |
+
class GMAIMMBenchDataset(ImageMCQDataset):
|
584 |
+
|
585 |
+
DATASET_URL = {
|
586 |
+
'GMAI-MMBench_VAL': 'https://huggingface.co/datasets/VLMEval/GMAI-MMBench/resolve/main/GMAI-MMBench_VAL.tsv',
|
587 |
+
'GMAI_mm_bench_TEST_part_1': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_1.tsv', # noqa: E501
|
588 |
+
'GMAI_mm_bench_TEST_part_2': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_2.tsv', # noqa: E501
|
589 |
+
'GMAI_mm_bench_TEST_part_3': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_3.tsv', # noqa: E501
|
590 |
+
'GMAI_mm_bench_TEST_part_4': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_4.tsv', # noqa: E501
|
591 |
+
'GMAI_mm_bench_TEST_part_5': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_5.tsv', # noqa: E501
|
592 |
+
'GMAI_mm_bench_TEST_part_6': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_6.tsv', # noqa: E501
|
593 |
+
'GMAI_mm_bench_TEST_part_7': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_7.tsv', # noqa: E501
|
594 |
+
'GMAI_mm_bench_TEST_part_8': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_8.tsv', # noqa: E501
|
595 |
+
'GMAI_mm_bench_TEST_part_9': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_9.tsv', # noqa: E501
|
596 |
+
'GMAI_mm_bench_TEST_part_10': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_10.tsv', # noqa: E501
|
597 |
+
'GMAI_mm_bench_TEST_part_11': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_11.tsv', # noqa: E501
|
598 |
+
}
|
599 |
+
|
600 |
+
DATASET_MD5 = {
|
601 |
+
'GMAI-MMBench_VAL': '254bd581627866f1c499d3d6b4422324',
|
602 |
+
'GMAI_mm_bench_TEST_part_1': '900d735231230a63f4ed45665c078ef4',
|
603 |
+
'GMAI_mm_bench_TEST_part_2': '1b27ab621386945d7e4a765ad2d22b0e',
|
604 |
+
'GMAI_mm_bench_TEST_part_3': '44bdc2b6267dd505d529b8cad06f0fb2',
|
605 |
+
'GMAI_mm_bench_TEST_part_4': '5a04a04fcac9f1466709f242fdb80acb',
|
606 |
+
'GMAI_mm_bench_TEST_part_5': 'c70baf8909eda9af0ddeab275c721336',
|
607 |
+
'GMAI_mm_bench_TEST_part_6': '825abc39596b644dead9350d0cfa3b96',
|
608 |
+
'GMAI_mm_bench_TEST_part_7': 'defb8aed2fb77365a76b6b9abd6a2701',
|
609 |
+
'GMAI_mm_bench_TEST_part_8': 'ff490d60b85f2bb0abb67a435b298c65',
|
610 |
+
'GMAI_mm_bench_TEST_part_9': 'ff67c86f40da93b09139ac1d1ba5dc6b',
|
611 |
+
'GMAI_mm_bench_TEST_part_10': '3dae94627b9ac0fe00180d4780fbf6dc',
|
612 |
+
'GMAI_mm_bench_TEST_part_11': 'd08dc813f0eb6bbab63cae2a9d113c4b',
|
613 |
+
}
|
614 |
+
|
615 |
+
@classmethod
|
616 |
+
def supported_datasets(cls):
|
617 |
+
return ['GMAI-MMBench_VAL', 'GMAI-MMBench_TEST']
|
618 |
+
|
619 |
+
def load_data(self, dataset):
|
620 |
+
if dataset == 'GMAI-MMBench_VAL':
|
621 |
+
data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv')
|
622 |
+
if file_size(data_path, 'GB') > 1:
|
623 |
+
local_path = data_path.replace('.tsv', '_local.tsv')
|
624 |
+
if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
|
625 |
+
from ..tools import LOCALIZE
|
626 |
+
LOCALIZE(data_path, local_path)
|
627 |
+
data_path = local_path
|
628 |
+
return load(data_path)
|
629 |
+
elif dataset == 'GMAI-MMBench_TEST':
|
630 |
+
dfs = []
|
631 |
+
for part_num in range(1, 12):
|
632 |
+
part_name = f'GMAI_mm_bench_TEST_part_{part_num}'
|
633 |
+
url = self.DATASET_URL[part_name]
|
634 |
+
file_md5 = self.DATASET_MD5.get(part_name)
|
635 |
+
tsv_path = osp.join(LMUDataRoot(), f'{part_name}.tsv')
|
636 |
+
if not osp.exists(tsv_path) or (file_md5 and md5(tsv_path) != file_md5):
|
637 |
+
download_file(url, filename=tsv_path)
|
638 |
+
local_path = tsv_path.replace('.tsv', '_local.tsv')
|
639 |
+
if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
|
640 |
+
from ..tools import LOCALIZE
|
641 |
+
LOCALIZE(tsv_path, local_path)
|
642 |
+
tsv_path = local_path
|
643 |
+
# 加载数据
|
644 |
+
df = load(tsv_path)
|
645 |
+
dfs.append(df)
|
646 |
+
# 合并所有数据
|
647 |
+
data = pd.concat(dfs, ignore_index=True)
|
648 |
+
return data
|
649 |
+
else:
|
650 |
+
raise ValueError(f"未知的数据集:{dataset}")
|
651 |
+
|
652 |
+
def report_acc_by_groups(self, df, group_column):
|
653 |
+
res = defaultdict(list)
|
654 |
+
|
655 |
+
# Check for the 'split' column
|
656 |
+
if 'split' in df:
|
657 |
+
splits = list(set(df['split']))
|
658 |
+
res['split'] = splits
|
659 |
+
else:
|
660 |
+
df['split'] = ['none'] * len(df)
|
661 |
+
res['split'] = ['none']
|
662 |
+
|
663 |
+
res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
|
664 |
+
|
665 |
+
if group_column not in df:
|
666 |
+
raise ValueError(f"Column '{group_column}' not found in dataframe.") # noqa: E713
|
667 |
+
|
668 |
+
abilities = list(set(df[group_column]))
|
669 |
+
abilities = ['None' if isinstance(ab, float) and pd.isna(ab) else ab for ab in abilities]
|
670 |
+
abilities.sort()
|
671 |
+
|
672 |
+
for ab in abilities:
|
673 |
+
ab_name = ab
|
674 |
+
sub_df = df[df[group_column] == ab]
|
675 |
+
res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
|
676 |
+
|
677 |
+
return pd.DataFrame(res)
|
678 |
+
|
679 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
680 |
+
from .utils.multiple_choice import report_acc, mcq_vanilla_eval
|
681 |
+
nproc = judge_kwargs.pop('nproc', 4)
|
682 |
+
|
683 |
+
suffix = eval_file.split('.')[-1]
|
684 |
+
model = judge_kwargs.get('model', 'exact_matching')
|
685 |
+
assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
|
686 |
+
name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
|
687 |
+
name_str = name_str_map[model] if model in name_str_map else model
|
688 |
+
|
689 |
+
if model == 'exact_matching':
|
690 |
+
model = None
|
691 |
+
elif gpt_key_set():
|
692 |
+
model = build_judge(**judge_kwargs)
|
693 |
+
if not model.working():
|
694 |
+
warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
|
695 |
+
warnings.warn(DEBUG_MESSAGE)
|
696 |
+
model = None
|
697 |
+
else:
|
698 |
+
warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
|
699 |
+
model = None
|
700 |
+
|
701 |
+
result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
|
702 |
+
|
703 |
+
data = load(eval_file)
|
704 |
+
data = data.sort_values(by='index')
|
705 |
+
data['prediction'] = [str(x) for x in data['prediction']]
|
706 |
+
# If not choice label, then use lower case
|
707 |
+
for k in data.keys():
|
708 |
+
data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
|
709 |
+
|
710 |
+
meta = self.data
|
711 |
+
meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
|
712 |
+
data_map = {x: y for x, y in zip(data['index'], data['question'])}
|
713 |
+
for k in data_map:
|
714 |
+
assert k in meta_q_map, (
|
715 |
+
f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
|
716 |
+
)
|
717 |
+
|
718 |
+
data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
|
719 |
+
|
720 |
+
# load split
|
721 |
+
dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
722 |
+
data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
723 |
+
|
724 |
+
acc = report_acc(data)
|
725 |
+
|
726 |
+
for group_col in ['clinical vqa task', 'department', 'perceptual granularity']:
|
727 |
+
acc_grouped = self.report_acc_by_groups(data, group_col)
|
728 |
+
score_file_grouped = eval_file.replace(f'.{suffix}', f'_{group_col}_acc.csv')
|
729 |
+
dump(acc_grouped, score_file_grouped)
|
730 |
+
|
731 |
+
return acc
|
732 |
+
|
733 |
+
|
734 |
+
class MMERealWorld(ImageMCQDataset):
|
735 |
+
|
736 |
+
TYPE = 'MMERealWorld'
|
737 |
+
|
738 |
+
DATASET_MD5 = {
|
739 |
+
'MME-RealWorld': '271c33ec814c39533c467ec6fb8a6f36',
|
740 |
+
'MME-RealWorld-Lite': '4c17057d7d3b6c4a0d4397c3dae0881c',
|
741 |
+
'MME-RealWorld-CN': 'daaa763d52a760a38606d5dedb3fe444',
|
742 |
+
}
|
743 |
+
SYS = {
|
744 |
+
'MME-RealWorld': (
|
745 |
+
'Select the best answer to the above multiple-choice question based on the image. '
|
746 |
+
'Respond with only the letter (A, B, C, D, or E) of the correct option. \n'
|
747 |
+
'The best answer is:'
|
748 |
+
),
|
749 |
+
'MME-RealWorld-Lite': (
|
750 |
+
'Select the best answer to the above multiple-choice question based on the image. '
|
751 |
+
'Respond with only the letter (A, B, C, D, or E) of the correct option. \n'
|
752 |
+
'The best answer is:'
|
753 |
+
),
|
754 |
+
'MME-RealWorld-CN': (
|
755 |
+
'根据图像选择上述多项选择题的最佳答案。只需回答正确选项的字母(A, B, C, D 或 E)。\n'
|
756 |
+
'最佳答案为:'
|
757 |
+
),
|
758 |
+
}
|
759 |
+
|
760 |
+
@classmethod
|
761 |
+
def supported_datasets(cls):
|
762 |
+
return ['MME-RealWorld', 'MME-RealWorld-CN', 'MME-RealWorld-Lite',]
|
763 |
+
|
764 |
+
def load_data(
|
765 |
+
self, dataset="MME-RealWorld", repo_id="yifanzhang114/MME-RealWorld-Base64"
|
766 |
+
):
|
767 |
+
|
768 |
+
def check_integrity(pth):
|
769 |
+
data_file = osp.join(pth, f"{dataset}.tsv")
|
770 |
+
|
771 |
+
if not os.path.exists(data_file):
|
772 |
+
return False
|
773 |
+
|
774 |
+
if md5(data_file) != self.DATASET_MD5[dataset]:
|
775 |
+
return False
|
776 |
+
return True
|
777 |
+
|
778 |
+
def generate_tsv(pth):
|
779 |
+
tsv_file = os.path.join(pth, f"{dataset}.tsv")
|
780 |
+
|
781 |
+
if os.path.exists(tsv_file):
|
782 |
+
print(f"{tsv_file} already exists.")
|
783 |
+
return
|
784 |
+
|
785 |
+
json_dir = os.path.join(pth, dataset)
|
786 |
+
json_files = [f for f in os.listdir(json_dir) if f.endswith(".json")]
|
787 |
+
|
788 |
+
data_list = []
|
789 |
+
for json_file in json_files:
|
790 |
+
with open(os.path.join(json_dir, json_file), "r") as f:
|
791 |
+
data = json.load(f)
|
792 |
+
for item in tqdm(data):
|
793 |
+
choice_prompt = (
|
794 |
+
"The choices are listed below:\n"
|
795 |
+
if dataset in ["MME-RealWorld", "MME-RealWorld-Lite"]
|
796 |
+
else "选项如下所示:\n"
|
797 |
+
)
|
798 |
+
data_list.append(
|
799 |
+
{
|
800 |
+
"index": item["index"],
|
801 |
+
"image": item["image"],
|
802 |
+
"question": item["question"],
|
803 |
+
"multi-choice options": choice_prompt
|
804 |
+
+ "\n".join(item["multi-choice options"]),
|
805 |
+
"A": item["multi-choice options"][0][4:],
|
806 |
+
"B": item["multi-choice options"][1][4:],
|
807 |
+
"C": item["multi-choice options"][2][4:],
|
808 |
+
"D": item["multi-choice options"][3][4:],
|
809 |
+
"E": item["multi-choice options"][4][4:],
|
810 |
+
"answer": item["answer"],
|
811 |
+
"category": item["category"],
|
812 |
+
"l2-category": item["l2-category"],
|
813 |
+
}
|
814 |
+
)
|
815 |
+
df = pd.DataFrame(data_list)
|
816 |
+
df.to_csv(tsv_file, sep="\t", index=False)
|
817 |
+
print(f"TSV file saved to {tsv_file}")
|
818 |
+
|
819 |
+
# Check if dataset is cached and has integrity
|
820 |
+
if dataset == "MME-RealWorld-Lite":
|
821 |
+
url = 'https://huggingface.co/datasets/yifanzhang114/MME-RealWorld-Base64/resolve/main/mme_realworld_lite.tsv' # noqa: E501
|
822 |
+
file_md5 = (
|
823 |
+
self.DATASET_MD5[dataset] if dataset in self.DATASET_MD5 else None
|
824 |
+
)
|
825 |
+
datas = self.prepare_tsv(url, file_md5)
|
826 |
+
choice_prompt = "The choices are listed below:\n"
|
827 |
+
for index, item in datas.iterrows():
|
828 |
+
options = eval(item["multi-choice options"])
|
829 |
+
datas.loc[index, "multi-choice options"] = choice_prompt + "\n".join(
|
830 |
+
options
|
831 |
+
)
|
832 |
+
datas.loc[index, "A"] = options[0][4:]
|
833 |
+
datas.loc[index, "B"] = options[1][4:]
|
834 |
+
datas.loc[index, "C"] = options[2][4:]
|
835 |
+
datas.loc[index, "D"] = options[3][4:]
|
836 |
+
datas.loc[index, "E"] = options[4][4:]
|
837 |
+
return datas
|
838 |
+
|
839 |
+
update_flag = False
|
840 |
+
cache_path = get_cache_path(repo_id)
|
841 |
+
if cache_path is not None and check_integrity(cache_path):
|
842 |
+
dataset_path = cache_path
|
843 |
+
print(f"Using cached dataset from {cache_path}")
|
844 |
+
else:
|
845 |
+
from huggingface_hub import snapshot_download
|
846 |
+
|
847 |
+
# Download or find the dataset path
|
848 |
+
dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
|
849 |
+
generate_tsv(dataset_path)
|
850 |
+
update_flag = True
|
851 |
+
|
852 |
+
data_path = os.path.join(dataset_path, f"{dataset}.tsv")
|
853 |
+
if file_size(data_path, "GB") > 1:
|
854 |
+
local_path = data_path.replace(".tsv", "_local.tsv")
|
855 |
+
if (
|
856 |
+
not osp.exists(local_path)
|
857 |
+
or os.environ.get("FORCE_LOCAL", None)
|
858 |
+
or update_flag
|
859 |
+
):
|
860 |
+
from vlmeval.tools import LOCALIZE
|
861 |
+
|
862 |
+
LOCALIZE(data_path, local_path)
|
863 |
+
data_path = local_path
|
864 |
+
return load(data_path)
|
865 |
+
|
866 |
+
def post_build(self, dataset):
|
867 |
+
self.TYPE = 'MMERealWorld'
|
868 |
+
|
869 |
+
# Given one data record, return the built prompt (a multi-modal message), can override
|
870 |
+
def build_prompt(self, line):
|
871 |
+
if isinstance(line, int):
|
872 |
+
line = self.data.iloc[line]
|
873 |
+
|
874 |
+
if self.meta_only:
|
875 |
+
tgt_path = toliststr(line['image_path'])
|
876 |
+
else:
|
877 |
+
tgt_path = self.dump_image(line)
|
878 |
+
|
879 |
+
question = line['question']
|
880 |
+
|
881 |
+
choice_prompt = line['multi-choice options'] + '\n'
|
882 |
+
question += ' ' + choice_prompt + self.SYS[self.dataset_name]
|
883 |
+
|
884 |
+
msgs = []
|
885 |
+
if isinstance(tgt_path, list):
|
886 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
887 |
+
else:
|
888 |
+
msgs = [dict(type='image', value=tgt_path)]
|
889 |
+
msgs.append(dict(type='text', value=question))
|
890 |
+
return msgs
|
891 |
+
|
892 |
+
# It returns a dictionary
|
893 |
+
@classmethod
|
894 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
895 |
+
from .utils.multiple_choice import extract_characters_regex, get_dimension_rating
|
896 |
+
assert eval_file.endswith('.xlsx'), 'data file should be an xlsx file'
|
897 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
898 |
+
tmp_file = eval_file.replace('.xlsx', '_tmp.pkl')
|
899 |
+
tgt_file = eval_file.replace('.xlsx', '_rating.json')
|
900 |
+
score_file = eval_file.replace('.xlsx', '_score.xlsx')
|
901 |
+
|
902 |
+
if not osp.exists(score_file):
|
903 |
+
|
904 |
+
res = {} if not osp.exists(tmp_file) else load(tmp_file)
|
905 |
+
res = {k: v for k, v in res.items() if FAIL_MSG not in v}
|
906 |
+
|
907 |
+
data = load(eval_file)
|
908 |
+
cnt_rejected = 0
|
909 |
+
data_un = data[~pd.isna(data['prediction'])]
|
910 |
+
|
911 |
+
for idx in data['index']:
|
912 |
+
ans = data.loc[data['index'] == idx, 'answer'].values[0]
|
913 |
+
pred = data.loc[data['index'] == idx, 'prediction'].values[0]
|
914 |
+
|
915 |
+
extract_pred = extract_characters_regex(pred)
|
916 |
+
if extract_pred == '':
|
917 |
+
cnt_rejected += 1
|
918 |
+
data.loc[data['index'] == idx, 'score'] = 0
|
919 |
+
else:
|
920 |
+
data.loc[data['index'] == idx, 'score'] = int(extract_pred == ans)
|
921 |
+
|
922 |
+
print(
|
923 |
+
f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
|
924 |
+
f'failed to obtain the score for another {cnt_rejected} questions. '
|
925 |
+
f'Those questions will be counted as 0 score in ALL rating.'
|
926 |
+
)
|
927 |
+
|
928 |
+
dump(data, score_file)
|
929 |
+
|
930 |
+
rating = get_dimension_rating(score_file)
|
931 |
+
dump(rating, tgt_file)
|
932 |
+
return rating
|
933 |
+
|
934 |
+
|
935 |
+
class HRBenchDataset(ImageMCQDataset):
|
936 |
+
|
937 |
+
DATASET_URL = {
|
938 |
+
'HRBench4K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_4k.tsv',
|
939 |
+
'HRBench8K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_8k.tsv',
|
940 |
+
}
|
941 |
+
|
942 |
+
DATASET_MD5 = {
|
943 |
+
'HRBench4K': 'f6b041b03d49543494b8a56d2e35be65',
|
944 |
+
'HRBench8K': '274c9c7f89329b804a4723178a00219c',
|
945 |
+
}
|
946 |
+
|
947 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
948 |
+
assert os.path.exists(eval_file), '{} does not exist!'.format(eval_file)
|
949 |
+
from .utils.multiple_choice import mcq_vanilla_eval
|
950 |
+
from .utils.hrbench import report_acc_hrbench
|
951 |
+
nproc = judge_kwargs.pop('nproc', 4)
|
952 |
+
|
953 |
+
suffix = eval_file.split('.')[-1]
|
954 |
+
model = judge_kwargs.get('model', 'extract_matching')
|
955 |
+
assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
|
956 |
+
name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
|
957 |
+
name_str = name_str_map[model] if model in name_str_map else model
|
958 |
+
|
959 |
+
if model == 'exact_matching':
|
960 |
+
model = None
|
961 |
+
elif gpt_key_set():
|
962 |
+
model = build_judge(**judge_kwargs)
|
963 |
+
if not model.working():
|
964 |
+
warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
|
965 |
+
warnings.warn(DEBUG_MESSAGE)
|
966 |
+
model = None
|
967 |
+
else:
|
968 |
+
warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
|
969 |
+
model = None
|
970 |
+
|
971 |
+
result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
|
972 |
+
|
973 |
+
data = load(eval_file)
|
974 |
+
data = data.sort_values(by='index')
|
975 |
+
data['prediction'] = [str(x) for x in data['prediction']]
|
976 |
+
# If not choice label, then use lower case
|
977 |
+
for k in data.keys():
|
978 |
+
data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
|
979 |
+
|
980 |
+
meta = self.data
|
981 |
+
meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
|
982 |
+
data_map = {x: y for x, y in zip(data['index'], data['question'])}
|
983 |
+
for k in data_map:
|
984 |
+
assert k in meta_q_map, (
|
985 |
+
f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
|
986 |
+
)
|
987 |
+
|
988 |
+
score_file = eval_file.replace(f'.{suffix}', '_acc.csv')
|
989 |
+
|
990 |
+
if osp.exists(score_file):
|
991 |
+
acc = load(score_file)
|
992 |
+
return acc
|
993 |
+
data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
|
994 |
+
dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
995 |
+
data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
|
996 |
+
|
997 |
+
acc = report_acc_hrbench(data)
|
998 |
+
|
999 |
+
score_file = eval_file.replace(f'.{suffix}', '_acc.csv')
|
1000 |
+
dump(acc, score_file)
|
1001 |
+
|
1002 |
+
return acc
|
1003 |
+
|
1004 |
+
|
1005 |
+
class CustomMCQDataset(ImageMCQDataset):
|
1006 |
+
|
1007 |
+
def load_data(self, dataset):
|
1008 |
+
data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv')
|
1009 |
+
|
1010 |
+
if file_size(data_path, 'GB') > 1:
|
1011 |
+
local_path = data_path.replace('.tsv', '_local.tsv')
|
1012 |
+
if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None):
|
1013 |
+
from ..tools import LOCALIZE
|
1014 |
+
LOCALIZE(data_path, local_path)
|
1015 |
+
data_path = local_path
|
1016 |
+
return load(data_path)
|
1017 |
+
|
1018 |
+
|
1019 |
+
class NaturalBenchDataset(ImageMCQDataset):
|
1020 |
+
|
1021 |
+
DATASET_URL = {
|
1022 |
+
'NaturalBenchDataset': (
|
1023 |
+
'https://huggingface.co/datasets/BaiqiL/'
|
1024 |
+
'NaturalBench/resolve/main/NaturalBenchDataset.tsv'
|
1025 |
+
),
|
1026 |
+
}
|
1027 |
+
DATASET_MD5 = {
|
1028 |
+
'NaturalBenchDataset':'dbe25b044bc35696426381e9ba4fe930',
|
1029 |
+
}
|
1030 |
+
|
1031 |
+
def build_prompt(self, line):
|
1032 |
+
SUFFIX_FOR_VQA = {
|
1033 |
+
"yes_no": "Please answer Yes or No.",
|
1034 |
+
"multiple_choice": "Please output the letter corresponding to the correct option."
|
1035 |
+
}
|
1036 |
+
if isinstance(line, int):
|
1037 |
+
line = self.data.iloc[line]
|
1038 |
+
|
1039 |
+
if self.meta_only:
|
1040 |
+
tgt_path = toliststr(line['image_path'])
|
1041 |
+
else:
|
1042 |
+
tgt_path = self.dump_image(line)
|
1043 |
+
|
1044 |
+
question = line['question']
|
1045 |
+
prompt = f'{question} {SUFFIX_FOR_VQA[line["type"]]}'
|
1046 |
+
msgs = []
|
1047 |
+
if isinstance(tgt_path, list):
|
1048 |
+
msgs.extend([dict(type='image', value=p) for p in tgt_path])
|
1049 |
+
else:
|
1050 |
+
msgs = [dict(type='image', value=tgt_path)]
|
1051 |
+
msgs.append(dict(type='text', value=prompt))
|
1052 |
+
|
1053 |
+
return msgs
|
1054 |
+
|
1055 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
1056 |
+
from .utils.naturalbench import extract_answer, get_scores
|
1057 |
+
|
1058 |
+
data = load(eval_file)
|
1059 |
+
data = data.sort_values(by='index')
|
1060 |
+
predictions = [str(x) for x in data['prediction']]
|
1061 |
+
answers = [str(x) for x in data['answer']]
|
1062 |
+
indexs = [str(x) for x in data['index']]
|
1063 |
+
meta = self.data
|
1064 |
+
types = [str(x) for x in meta['type']]
|
1065 |
+
results = {}
|
1066 |
+
assert len(predictions) == len(answers) == len(indexs) == len(types) == (1900 * 4)
|
1067 |
+
number_answered_samples = len(predictions) // 4
|
1068 |
+
for i in range(number_answered_samples):
|
1069 |
+
results[i] = {
|
1070 |
+
"q0_i0": extract_answer(predictions[i * 4], types[i * 4]),
|
1071 |
+
"q0_i1": extract_answer(predictions[i * 4 + 1], types[i * 4 + 1]),
|
1072 |
+
"q1_i0": extract_answer(predictions[i * 4 + 2], types[i * 4 + 2]),
|
1073 |
+
"q1_i1": extract_answer(predictions[i * 4 + 3], types[i * 4 + 3])
|
1074 |
+
}
|
1075 |
+
|
1076 |
+
scores = get_scores(results)
|
1077 |
+
print(scores)
|
1078 |
+
score_file = 'NaturalBench_acc.csv'
|
1079 |
+
df = pd.DataFrame(list(scores.items()), columns=['Metric', 'Score'])
|
1080 |
+
dump(df, score_file)
|
1081 |
+
|
1082 |
+
return scores
|