yuvalkirstain commited on
Commit
76afc16
·
1 Parent(s): f16de40

update with some datasets

Browse files
Files changed (1) hide show
  1. mrqa.py +237 -0
mrqa.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MRQA 2019 Shared task dataset."""
16
+
17
+ import json
18
+
19
+ import datasets
20
+
21
+ _CITATION = """\
22
+ @inproceedings{fisch2019mrqa,
23
+ title={{MRQA} 2019 Shared Task: Evaluating Generalization in Reading Comprehension},
24
+ author={Adam Fisch and Alon Talmor and Robin Jia and Minjoon Seo and Eunsol Choi and Danqi Chen},
25
+ booktitle={Proceedings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP},
26
+ year={2019},
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ The MRQA 2019 Shared Task focuses on generalization in question answering.
32
+ An effective question answering system should do more than merely
33
+ interpolate from the training set to answer test examples drawn
34
+ from the same distribution: it should also be able to extrapolate
35
+ to out-of-distribution examples — a significantly harder challenge.
36
+ The dataset is a collection of 18 existing QA dataset (carefully selected
37
+ subset of them) and converted to the same format (SQuAD format). Among
38
+ these 18 datasets, six datasets were made available for training,
39
+ six datasets were made available for development, and the final six
40
+ for testing. The dataset is released as part of the MRQA 2019 Shared Task.
41
+ """
42
+
43
+ _HOMEPAGE = "https://mrqa.github.io/2019/shared.html"
44
+
45
+ _LICENSE = "Unknwon"
46
+
47
+ _URLs = {
48
+ # Train sub-datasets
49
+ "train+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SQuAD.jsonl.gz",
50
+ "train+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NewsQA.jsonl.gz",
51
+ "train+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/TriviaQA-web.jsonl.gz",
52
+ "train+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SearchQA.jsonl.gz",
53
+ "train+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/HotpotQA.jsonl.gz",
54
+ "train+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NaturalQuestionsShort.jsonl.gz",
55
+ # Validation sub-datasets
56
+ "validation+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SQuAD.jsonl.gz",
57
+ "validation+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NewsQA.jsonl.gz",
58
+ "validation+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TriviaQA-web.jsonl.gz",
59
+ "validation+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SearchQA.jsonl.gz",
60
+ "validation+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/HotpotQA.jsonl.gz",
61
+ "validation+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NaturalQuestionsShort.jsonl.gz",
62
+ # Test sub-datasets
63
+ "test+BioASQ": "http://participants-area.bioasq.org/MRQA2019/", # BioASQ.jsonl.gz
64
+ "test+DROP": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DROP.jsonl.gz",
65
+ "test+DuoRC": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DuoRC.ParaphraseRC.jsonl.gz",
66
+ "test+RACE": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RACE.jsonl.gz",
67
+ "test+RelationExtraction": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RelationExtraction.jsonl.gz",
68
+ "test+TextbookQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TextbookQA.jsonl.gz",
69
+ }
70
+
71
+
72
+ class MRQAConfig(datasets.BuilderConfig):
73
+ """BuilderConfig for FS."""
74
+
75
+ def __init__(self, data_url, **kwargs):
76
+ """BuilderConfig for FS.
77
+ Args:
78
+ additional_features: `list[string]`, list of the features that will appear in the feature dict
79
+ additionally to the self.id_key, self.source_key and self.target_key. Should not include "label".
80
+ data_url: `string`, url to download the zip file from.
81
+ citation: `string`, citation for the data set.
82
+ url: `string`, url for information about the data set.
83
+ label_classes: `list[string]`, the list of classes for the label if the
84
+ label is present as a string. Non-string labels will be cast to either
85
+ 'False' or 'True'.
86
+ **kwargs: keyword arguments forwarded to super.
87
+ """
88
+ super(MRQAConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
89
+ self.data_url = data_url
90
+
91
+
92
+ class MRQA(datasets.GeneratorBasedBuilder):
93
+ """MRQA 2019 Shared task dataset."""
94
+
95
+ VERSION = datasets.Version("1.1.0")
96
+
97
+ BUILDER_CONFIGS = [
98
+ MRQAConfig(
99
+ name="newsqa",
100
+ data_url={"validation": _URLs["validation+NewsQA"],
101
+ "train": _URLs["train+NewsQA"],
102
+ "test": _URLs["validation+NewsQA"]}
103
+ ),
104
+ MRQAConfig(
105
+ name="natural_questions",
106
+ data_url={"validation": _URLs["validation+NaturalQuestions"],
107
+ "train": _URLs["train+NaturalQuestions"],
108
+ "test": _URLs["validation+NaturalQuestions"]}
109
+ ),
110
+ MRQAConfig(
111
+ name="hotpotqa",
112
+ data_url={"validation": _URLs["validation+HotpotQA"],
113
+ "train": _URLs["train+HotpotQA"],
114
+ "test": _URLs["validation+HotpotQA"]}
115
+ ),
116
+ ]
117
+
118
+ def _info(self):
119
+ return datasets.DatasetInfo(
120
+ description=_DESCRIPTION,
121
+ # Format is derived from https://github.com/mrqa/MRQA-Shared-Task-2019#mrqa-format
122
+ features=datasets.Features(
123
+ {
124
+ "subset": datasets.Value("string"),
125
+ "context": datasets.Value("string"),
126
+ "context_tokens": datasets.Sequence(
127
+ {
128
+ "tokens": datasets.Value("string"),
129
+ "offsets": datasets.Value("int32"),
130
+ }
131
+ ),
132
+ "qid": datasets.Value("string"),
133
+ "question": datasets.Value("string"),
134
+ "question_tokens": datasets.Sequence(
135
+ {
136
+ "tokens": datasets.Value("string"),
137
+ "offsets": datasets.Value("int32"),
138
+ }
139
+ ),
140
+ "detected_answers": datasets.Sequence(
141
+ {
142
+ "text": datasets.Value("string"),
143
+ "char_spans": datasets.Sequence(
144
+ {
145
+ "start": datasets.Value("int32"),
146
+ "end": datasets.Value("int32"),
147
+ }
148
+ ),
149
+ "token_spans": datasets.Sequence(
150
+ {
151
+ "start": datasets.Value("int32"),
152
+ "end": datasets.Value("int32"),
153
+ }
154
+ ),
155
+ }
156
+ ),
157
+ "answers": datasets.Sequence(datasets.Value("string")),
158
+ }
159
+ ),
160
+ supervised_keys=None,
161
+ homepage=_HOMEPAGE,
162
+ license=_LICENSE,
163
+ citation=_CITATION,
164
+ )
165
+
166
+ def _split_generators(self, dl_manager):
167
+ """Returns SplitGenerators."""
168
+ data_dir = dl_manager.download_and_extract(self.config.data_url)
169
+
170
+ return [
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.VALIDATION,
173
+ gen_kwargs={
174
+ "filepaths_dict": data_dir,
175
+ "split": "validation",
176
+ },
177
+ ),
178
+ datasets.SplitGenerator(
179
+ name=datasets.Split.TRAIN,
180
+ gen_kwargs={
181
+ "filepaths_dict": data_dir,
182
+ "split": "train",
183
+ },
184
+ ),
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TEST,
187
+ gen_kwargs={
188
+ "filepaths_dict": data_dir,
189
+ "split": "test",
190
+ },
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepaths_dict, split):
195
+ """Yields examples."""
196
+ for source, filepath in filepaths_dict.items():
197
+ if source not in split:
198
+ continue
199
+ with open(filepath, encoding="utf-8") as f:
200
+ header = next(f)
201
+ subset = json.loads(header)["header"]["dataset"]
202
+
203
+ for row in f:
204
+ paragraph = json.loads(row)
205
+ context = paragraph["context"].strip()
206
+ context_tokens = [{"tokens": t[0], "offsets": t[1]} for t in paragraph["context_tokens"]]
207
+ for qa in paragraph["qas"]:
208
+ qid = qa["qid"]
209
+ question = qa["question"].strip()
210
+ question_tokens = [{"tokens": t[0], "offsets": t[1]} for t in qa["question_tokens"]]
211
+ detected_answers = []
212
+ for detect_ans in qa["detected_answers"]:
213
+ detected_answers.append(
214
+ {
215
+ "text": detect_ans["text"].strip(),
216
+ "char_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["char_spans"]],
217
+ "token_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["token_spans"]],
218
+ }
219
+ )
220
+ answers = qa["answers"]
221
+ final_row = {
222
+ "subset": subset,
223
+ "context": context,
224
+ "context_tokens": context_tokens,
225
+ "qid": qid,
226
+ "question": question,
227
+ "question_tokens": question_tokens,
228
+ "detected_answers": detected_answers,
229
+ "answers": answers,
230
+ }
231
+ yield f"{source}_{qid}", final_row
232
+
233
+
234
+ if __name__ == '__main__':
235
+ from datasets import load_dataset
236
+ ssfd_debug = load_dataset("/Users/yuvalkirstain/repos/mrqa", name="hotpotqa")
237
+ x = 5