holylovenia commited on
Commit
d66ed6a
·
verified ·
1 Parent(s): 63a8a0f

Upload mabl.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mabl.py +227 -0
mabl.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ from datasets.download.download_manager import DownloadManager
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = r"""
13
+ @inproceedings{kabra-etal-2023-multi,
14
+ title = "Multi-lingual and Multi-cultural Figurative Language Understanding",
15
+ author = "Kabra, Anubha and
16
+ Liu, Emmy and
17
+ Khanuja, Simran and
18
+ Aji, Alham Fikri and
19
+ Winata, Genta and
20
+ Cahyawijaya, Samuel and
21
+ Aremu, Anuoluwapo and
22
+ Ogayo, Perez and
23
+ Neubig, Graham",
24
+ editor = "Rogers, Anna and
25
+ Boyd-Graber, Jordan and
26
+ Okazaki, Naoaki",
27
+ booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
28
+ month = jul,
29
+ year = "2023",
30
+ address = "Toronto, Canada",
31
+ publisher = "Association for Computational Linguistics",
32
+ url = "https://aclanthology.org/2023.findings-acl.525",
33
+ doi = "10.18653/v1/2023.findings-acl.525",
34
+ pages = "8269--8284",
35
+ }
36
+ """
37
+
38
+ _LOCAL = False
39
+ _LANGUAGES = ["ind", "jav", "sun"]
40
+ _DATASETNAME = "mabl"
41
+ _DESCRIPTION = r"""\
42
+ The MABL (Metaphors Across Borders and Languages) dataset is a collection of
43
+ 6,366 figurative language expressions from seven languages, crafted to improve
44
+ multilingual models' understanding of figurative speech and its linguistic
45
+ variations. It was built by crowdsourcing native speakers to generate paired
46
+ metaphors that began with the same words but had different meanings, as well as
47
+ the literal interpretations of both phrases. Each expression was checked by
48
+ fluent speakers to ensure they were clear, appropriate, and followed the format,
49
+ discarding any that didn't meet these standards.
50
+ """
51
+
52
+ _HOMEPAGE = "https://github.com/simran-khanuja/Multilingual-Fig-QA"
53
+ _LICENSE = Licenses.MIT.value
54
+ _URL = "https://raw.githubusercontent.com/simran-khanuja/Multilingual-Fig-QA/main/langdata/"
55
+
56
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
57
+ _SOURCE_VERSION = "1.0.0"
58
+ _SEACROWD_VERSION = "2024.06.20"
59
+
60
+
61
+ def iso3to2(lang: str) -> str:
62
+ """Convert 3-letter ISO code to its 2-letter equivalent"""
63
+ iso_map = {"ind": "id", "jav": "jv", "sun": "su"}
64
+ return iso_map[lang]
65
+
66
+
67
+ class MABLDataset(datasets.GeneratorBasedBuilder):
68
+ """MABL dataset by Liu et al (2023)"""
69
+
70
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
71
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
72
+
73
+ SEACROWD_SCHEMA_NAME = "qa"
74
+
75
+ dataset_names = sorted([f"{_DATASETNAME}_{lang}" for lang in _LANGUAGES])
76
+ BUILDER_CONFIGS = []
77
+ for name in dataset_names:
78
+ source_config = SEACrowdConfig(
79
+ name=f"{name}_source",
80
+ version=SOURCE_VERSION,
81
+ description=f"{_DATASETNAME} source schema",
82
+ schema="source",
83
+ subset_id=name,
84
+ )
85
+ BUILDER_CONFIGS.append(source_config)
86
+ seacrowd_config = SEACrowdConfig(
87
+ name=f"{name}_seacrowd_{SEACROWD_SCHEMA_NAME}",
88
+ version=SEACROWD_VERSION,
89
+ description=f"{_DATASETNAME} SEACrowd schema",
90
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
91
+ subset_id=name,
92
+ )
93
+ BUILDER_CONFIGS.append(seacrowd_config)
94
+
95
+ # Add configuration that allows loading all languages at once.
96
+ BUILDER_CONFIGS.extend(
97
+ [
98
+ # mabl_source
99
+ SEACrowdConfig(
100
+ name=f"{_DATASETNAME}_source",
101
+ version=SOURCE_VERSION,
102
+ description=f"{_DATASETNAME} source schema (all)",
103
+ schema="source",
104
+ subset_id=_DATASETNAME,
105
+ ),
106
+ # mabl_seacrowd_qa
107
+ SEACrowdConfig(
108
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
109
+ version=SEACROWD_VERSION,
110
+ description=f"{_DATASETNAME} SEACrowd schema (all)",
111
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
112
+ subset_id=_DATASETNAME,
113
+ ),
114
+ ]
115
+ )
116
+
117
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
118
+
119
+ def _info(self) -> datasets.DatasetInfo:
120
+ if self.config.schema == "source":
121
+ features = datasets.Features(
122
+ {
123
+ "id": datasets.Value("string"),
124
+ "startphrase": datasets.Value("string"),
125
+ "ending1": datasets.Value("string"),
126
+ "ending2": datasets.Value("string"),
127
+ "labels": datasets.Value("string"),
128
+ }
129
+ )
130
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
131
+ features = schemas.qa_features
132
+ return datasets.DatasetInfo(
133
+ description=_DESCRIPTION,
134
+ features=features,
135
+ homepage=_HOMEPAGE,
136
+ license=_LICENSE,
137
+ citation=_CITATION,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
141
+ """Return SplitGenerators."""
142
+ mabl_source_data = []
143
+ languages = []
144
+
145
+ lang = self.config.name.split("_")[1]
146
+ if lang in _LANGUAGES:
147
+ # Load data per language
148
+ mabl_source_data.append(dl_manager.download_and_extract(_URL + f"{iso3to2(lang)}.csv"))
149
+ languages.append(lang)
150
+ else:
151
+ # Load examples for all languages at once.
152
+ # We run this block when mabl_source / mabl_seacrowd_qa was chosen.
153
+ for lang in _LANGUAGES:
154
+ mabl_source_data.append(dl_manager.download_and_extract(_URL + f"{iso3to2(lang)}.csv"))
155
+ languages.append(lang)
156
+
157
+ return [
158
+ datasets.SplitGenerator(
159
+ # The MABL paper mentions that due to the size of each subset,
160
+ # they consider each split as a test set.
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "filepaths": mabl_source_data,
164
+ "split": "test",
165
+ "languages": languages,
166
+ },
167
+ )
168
+ ]
169
+
170
+ def _generate_examples(self, filepaths: List[Path], split: str, languages: List[str]) -> Tuple[int, Dict]:
171
+ """Yield examples as (key, example) tuples"""
172
+
173
+ startphrases = []
174
+ endings1 = []
175
+ endings2 = []
176
+ labels = []
177
+
178
+ for lang, filepath in zip(languages, filepaths):
179
+ with open(filepath, encoding="utf-8") as f:
180
+ csv_reader = csv.reader(f, delimiter=",")
181
+ next(csv_reader, None) # skip the headers
182
+ for row in csv_reader:
183
+ # Unfortunately, the columns in the subfiles of the MABL
184
+ # dataset are inconsistent. For 'ind', it is [ending1,
185
+ # ending2, labels, startphrase]. But for 'jav' and 'sun',
186
+ # the labels and startphrase columns were switched. Here,
187
+ # I'm just hard-coding the column names
188
+ if lang == "ind":
189
+ end1, end2, label, start = row
190
+ if lang == "jav" or lang == "sun":
191
+ end1, end2, start, label = row
192
+
193
+ startphrases.append(start)
194
+ endings1.append(end1)
195
+ endings2.append(end2)
196
+ labels.append(label)
197
+
198
+ for idx, (start, end1, end2, label) in enumerate(zip(startphrases, endings1, endings2, labels)):
199
+ if self.config.schema == "source":
200
+ example = {
201
+ "id": str(idx),
202
+ "startphrase": start,
203
+ "ending1": end1,
204
+ "ending2": end2,
205
+ "labels": label,
206
+ }
207
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
208
+ # Create QA-specific items
209
+ choices = [end1, end2]
210
+ answer = choices[int(label)]
211
+
212
+ # MABL doesn't differentiate between question and context.
213
+ # It only contains a startphrase. Given that, I put the
214
+ # startphrase in question and kept the context blank.
215
+ example = {
216
+ "id": str(idx),
217
+ "question_id": idx,
218
+ "document_id": idx,
219
+ "question": start,
220
+ "type": "multiple_choice",
221
+ "choices": choices,
222
+ "context": "",
223
+ "answer": [answer],
224
+ "meta": {},
225
+ }
226
+
227
+ yield idx, example