|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering.""" |
|
|
|
|
|
import json |
|
import textwrap |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{xanh2020_2wikimultihop, |
|
title = "Constructing A Multi-hop {QA} Dataset for Comprehensive Evaluation of Reasoning Steps", |
|
author = "Ho, Xanh and |
|
Duong Nguyen, Anh-Khoa and |
|
Sugawara, Saku and |
|
Aizawa, Akiko", |
|
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics", |
|
month = dec, |
|
year = "2020", |
|
address = "Barcelona, Spain (Online)", |
|
publisher = "International Committee on Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/2020.coling-main.580", |
|
pages = "6609--6625", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
_URL_BASE = "data" |
|
|
|
|
|
class TwowikimultihopQA(datasets.GeneratorBasedBuilder): |
|
"""2wikimultihopQA is a Dataset for Diverse, Explainable Multi-hop Question Answering.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answer": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"supporting_facts": datasets.features.Sequence( |
|
{ |
|
"title": datasets.Value("string"), |
|
"sent_id": datasets.Value("int32"), |
|
} |
|
), |
|
"context": datasets.features.Sequence( |
|
{ |
|
"title": datasets.Value("string"), |
|
"sentences": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
"evidences": datasets.features.Sequence( |
|
datasets.features.Sequence( |
|
datasets.Value("string") |
|
) |
|
), |
|
"entity_ids": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/Alab-NII/2wikimultihop", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
paths = { |
|
datasets.Split.TRAIN: f"{_URL_BASE}/train.json", |
|
datasets.Split.VALIDATION: f"{_URL_BASE}/dev.json", |
|
datasets.Split.TEST: f"{_URL_BASE}/test.json", |
|
} |
|
|
|
files = dl_manager.download(paths) |
|
|
|
split_generators = [] |
|
for split in files: |
|
split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={"data_file": files[split]})) |
|
|
|
return split_generators |
|
|
|
def _generate_examples(self, data_file): |
|
"""This function returns the examples.""" |
|
data = json.load(open(data_file)) |
|
for idx, example in enumerate(data): |
|
|
|
|
|
for k in ["answer", "type", "level"]: |
|
if k not in example.keys(): |
|
example[k] = None |
|
|
|
if "supporting_facts" not in example.keys(): |
|
example["supporting_facts"] = [] |
|
|
|
yield idx, { |
|
"id": example["_id"], |
|
"question": example["question"], |
|
"answer": example["answer"], |
|
"type": example["type"], |
|
"supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in example["supporting_facts"]], |
|
"context": [{"title": f[0], "sentences": f[1]} for f in example["context"]], |
|
"evidences": example["evidences"], |
|
"entity_ids": example["entity_ids"] |
|
} |
|
|