Support 'domains' kwarg in loading
Browse files- qa_srl2020.py +37 -10
qa_srl2020.py
CHANGED
@@ -18,7 +18,7 @@
|
|
18 |
import datasets
|
19 |
from dataclasses import dataclass
|
20 |
from pathlib import Path
|
21 |
-
from typing import List, Tuple
|
22 |
import pandas as pd
|
23 |
import json
|
24 |
import gzip
|
@@ -88,10 +88,16 @@ _URLs = {
|
|
88 |
|
89 |
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
|
90 |
|
|
|
|
|
91 |
@dataclass
|
92 |
class QASRL2020BuilderConfig(datasets.BuilderConfig):
|
93 |
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
|
94 |
load_from: str = "jsonl" # "csv" or "jsonl"
|
|
|
|
|
|
|
|
|
95 |
|
96 |
|
97 |
class QaSrl2020(datasets.GeneratorBasedBuilder):
|
@@ -168,6 +174,22 @@ class QaSrl2020(datasets.GeneratorBasedBuilder):
|
|
168 |
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
|
169 |
"""Returns SplitGenerators."""
|
170 |
assert self.config.load_from in ("csv", "jsonl")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
if self.config.load_from == "csv":
|
173 |
# prepare wiktionary for verb inflections inside 'self.verb_inflections'
|
@@ -185,20 +207,20 @@ class QaSrl2020(datasets.GeneratorBasedBuilder):
|
|
185 |
name=datasets.Split.VALIDATION,
|
186 |
# These kwargs will be passed to _generate_examples
|
187 |
gen_kwargs={
|
188 |
-
"qasrl_annotations_paths": [corpora["qasrl-annotations"]["
|
189 |
-
|
190 |
-
"sentences_paths": [corpora["sentences"]["
|
191 |
-
|
192 |
},
|
193 |
),
|
194 |
datasets.SplitGenerator(
|
195 |
name=datasets.Split.TEST,
|
196 |
# These kwargs will be passed to _generate_examples
|
197 |
gen_kwargs={
|
198 |
-
"qasrl_annotations_paths": [corpora["qasrl-annotations"]["
|
199 |
-
|
200 |
-
"sentences_paths": [corpora["sentences"]["
|
201 |
-
|
202 |
},
|
203 |
),
|
204 |
]
|
@@ -238,6 +260,11 @@ class QaSrl2020(datasets.GeneratorBasedBuilder):
|
|
238 |
sent_obj = json.loads(line.strip())
|
239 |
tokens = sent_obj['sentenceTokens']
|
240 |
sentence = ' '.join(tokens)
|
|
|
|
|
|
|
|
|
|
|
241 |
for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
|
242 |
verb_forms = verb_obj['verbInflectedForms']
|
243 |
predicate = tokens[int(predicate_idx)]
|
@@ -267,7 +294,7 @@ class QaSrl2020(datasets.GeneratorBasedBuilder):
|
|
267 |
|
268 |
yield qa_counter, {
|
269 |
"sentence": sentence,
|
270 |
-
"sent_id":
|
271 |
"predicate_idx": predicate_idx,
|
272 |
"predicate": predicate,
|
273 |
"is_verbal": True,
|
|
|
18 |
import datasets
|
19 |
from dataclasses import dataclass
|
20 |
from pathlib import Path
|
21 |
+
from typing import List, Tuple, Union, Set, Iterable
|
22 |
import pandas as pd
|
23 |
import json
|
24 |
import gzip
|
|
|
88 |
|
89 |
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
|
90 |
|
91 |
+
SUPPOERTED_DOMAINS = {"wikinews", "wikipedia"}
|
92 |
+
|
93 |
@dataclass
|
94 |
class QASRL2020BuilderConfig(datasets.BuilderConfig):
|
95 |
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
|
96 |
load_from: str = "jsonl" # "csv" or "jsonl"
|
97 |
+
|
98 |
+
domains: Union[str, Iterable[str]] = "all" # can provide also a subset of acceptable domains.
|
99 |
+
# Acceptable domains are {"wikipedia", "wikinews"} for dev and test (qasrl-2020)
|
100 |
+
# and {"wikipedia", "wikinews", "TQA"} for train (qasrl-2018)
|
101 |
|
102 |
|
103 |
class QaSrl2020(datasets.GeneratorBasedBuilder):
|
|
|
174 |
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
|
175 |
"""Returns SplitGenerators."""
|
176 |
assert self.config.load_from in ("csv", "jsonl")
|
177 |
+
|
178 |
+
# Handle domain selection
|
179 |
+
domains: Set[str] = []
|
180 |
+
if self.config.domains == "all":
|
181 |
+
domains = SUPPOERTED_DOMAINS
|
182 |
+
elif isinstance(self.config.domains, str):
|
183 |
+
if self.config.domains in SUPPOERTED_DOMAINS:
|
184 |
+
domains = {self.config.domains}
|
185 |
+
else:
|
186 |
+
raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
|
187 |
+
else:
|
188 |
+
domains = set(self.config.domains) & SUPPOERTED_DOMAINS
|
189 |
+
if len(domains) == 0:
|
190 |
+
raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
|
191 |
+
self.config.domains = domains
|
192 |
+
|
193 |
|
194 |
if self.config.load_from == "csv":
|
195 |
# prepare wiktionary for verb inflections inside 'self.verb_inflections'
|
|
|
207 |
name=datasets.Split.VALIDATION,
|
208 |
# These kwargs will be passed to _generate_examples
|
209 |
gen_kwargs={
|
210 |
+
"qasrl_annotations_paths": [corpora["qasrl-annotations"][f"{domain}.dev"]
|
211 |
+
for domain in domains],
|
212 |
+
"sentences_paths": [corpora["sentences"][f"{domain}.dev"]
|
213 |
+
for domain in domains],
|
214 |
},
|
215 |
),
|
216 |
datasets.SplitGenerator(
|
217 |
name=datasets.Split.TEST,
|
218 |
# These kwargs will be passed to _generate_examples
|
219 |
gen_kwargs={
|
220 |
+
"qasrl_annotations_paths": [corpora["qasrl-annotations"][f"{domain}.test"]
|
221 |
+
for domain in domains],
|
222 |
+
"sentences_paths": [corpora["sentences"][f"{domain}.test"]
|
223 |
+
for domain in domains],
|
224 |
},
|
225 |
),
|
226 |
]
|
|
|
260 |
sent_obj = json.loads(line.strip())
|
261 |
tokens = sent_obj['sentenceTokens']
|
262 |
sentence = ' '.join(tokens)
|
263 |
+
sent_id = sent_obj['sentenceId']
|
264 |
+
# consider only selected domains
|
265 |
+
sent_domain = sent_id.split(":")[1]
|
266 |
+
if sent_domain not in self.config.domains:
|
267 |
+
continue
|
268 |
for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
|
269 |
verb_forms = verb_obj['verbInflectedForms']
|
270 |
predicate = tokens[int(predicate_idx)]
|
|
|
294 |
|
295 |
yield qa_counter, {
|
296 |
"sentence": sentence,
|
297 |
+
"sent_id": sent_id,
|
298 |
"predicate_idx": predicate_idx,
|
299 |
"predicate": predicate,
|
300 |
"is_verbal": True,
|