Datasets:
Tasks:
Question Answering
Modalities:
Text
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
chained-qa
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- babi_qa.py +71 -67
README.md
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
---
|
|
|
|
| 2 |
annotations_creators:
|
| 3 |
- machine-generated
|
| 4 |
language_creators:
|
|
|
|
| 1 |
---
|
| 2 |
+
pretty_name: BabiQa
|
| 3 |
annotations_creators:
|
| 4 |
- machine-generated
|
| 5 |
language_creators:
|
babi_qa.py
CHANGED
|
@@ -15,8 +15,6 @@
|
|
| 15 |
"""The bAbI tasks dataset."""
|
| 16 |
|
| 17 |
|
| 18 |
-
import os
|
| 19 |
-
|
| 20 |
import datasets
|
| 21 |
|
| 22 |
|
|
@@ -851,20 +849,22 @@ class BabiQa(datasets.GeneratorBasedBuilder):
|
|
| 851 |
def _split_generators(self, dl_manager):
|
| 852 |
"""Returns SplitGenerators."""
|
| 853 |
my_urls = ZIP_URL
|
| 854 |
-
|
| 855 |
splits = [
|
| 856 |
datasets.SplitGenerator(
|
| 857 |
name=datasets.Split.TRAIN,
|
| 858 |
# These kwargs will be passed to _generate_examples
|
| 859 |
gen_kwargs={
|
| 860 |
-
"filepath":
|
|
|
|
| 861 |
},
|
| 862 |
),
|
| 863 |
datasets.SplitGenerator(
|
| 864 |
name=datasets.Split.TEST,
|
| 865 |
# These kwargs will be passed to _generate_examples
|
| 866 |
gen_kwargs={
|
| 867 |
-
"filepath":
|
|
|
|
| 868 |
},
|
| 869 |
),
|
| 870 |
]
|
|
@@ -874,73 +874,77 @@ class BabiQa(datasets.GeneratorBasedBuilder):
|
|
| 874 |
name=datasets.Split.VALIDATION,
|
| 875 |
# These kwargs will be passed to _generate_examples
|
| 876 |
gen_kwargs={
|
| 877 |
-
"filepath":
|
|
|
|
| 878 |
},
|
| 879 |
),
|
| 880 |
]
|
| 881 |
return splits
|
| 882 |
|
| 883 |
-
def _generate_examples(self, filepath):
|
| 884 |
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 890 |
if story != []:
|
| 891 |
yield example_idx, {"story": story}
|
| 892 |
-
|
| 893 |
-
story = []
|
| 894 |
-
elif line.strip().split()[0] == "1": # New story
|
| 895 |
-
if story != []: # Already some story, flush it out
|
| 896 |
-
yield example_idx, {"story": story}
|
| 897 |
-
example_idx += 1
|
| 898 |
-
story = []
|
| 899 |
-
line_no = line.split()[0]
|
| 900 |
-
line_split = line[len(line_no) :].strip().split("\t")
|
| 901 |
-
if len(line_split) > 1:
|
| 902 |
-
story.append(
|
| 903 |
-
{
|
| 904 |
-
"id": line_no,
|
| 905 |
-
"type": 1, # question
|
| 906 |
-
"supporting_ids": line_split[-1].split(" "),
|
| 907 |
-
"text": line_split[0].strip(),
|
| 908 |
-
"answer": line_split[1].strip(),
|
| 909 |
-
}
|
| 910 |
-
)
|
| 911 |
-
else:
|
| 912 |
-
story.append(
|
| 913 |
-
{
|
| 914 |
-
"id": line_no,
|
| 915 |
-
"type": 0, # context
|
| 916 |
-
"supporting_ids": [],
|
| 917 |
-
"text": line_split[0].strip(),
|
| 918 |
-
"answer": "",
|
| 919 |
-
}
|
| 920 |
-
)
|
| 921 |
-
else:
|
| 922 |
-
line_no = line.split()[0]
|
| 923 |
-
line_split = line[len(line_no) :].strip().split("\t")
|
| 924 |
-
if len(line_split) > 1:
|
| 925 |
-
story.append(
|
| 926 |
-
{
|
| 927 |
-
"id": line_no,
|
| 928 |
-
"type": 1, # question
|
| 929 |
-
"supporting_ids": line_split[-1].split(" "),
|
| 930 |
-
"text": line_split[0].strip(),
|
| 931 |
-
"answer": line_split[1].strip(),
|
| 932 |
-
}
|
| 933 |
-
)
|
| 934 |
-
else:
|
| 935 |
-
story.append(
|
| 936 |
-
{
|
| 937 |
-
"id": line_no,
|
| 938 |
-
"type": 0, # context
|
| 939 |
-
"supporting_ids": [],
|
| 940 |
-
"text": line_split[0].strip(),
|
| 941 |
-
"answer": "",
|
| 942 |
-
}
|
| 943 |
-
)
|
| 944 |
-
else: # After last line
|
| 945 |
-
if story != []:
|
| 946 |
-
yield example_idx, {"story": story}
|
|
|
|
| 15 |
"""The bAbI tasks dataset."""
|
| 16 |
|
| 17 |
|
|
|
|
|
|
|
| 18 |
import datasets
|
| 19 |
|
| 20 |
|
|
|
|
| 849 |
def _split_generators(self, dl_manager):
|
| 850 |
"""Returns SplitGenerators."""
|
| 851 |
my_urls = ZIP_URL
|
| 852 |
+
archive = dl_manager.download(my_urls)
|
| 853 |
splits = [
|
| 854 |
datasets.SplitGenerator(
|
| 855 |
name=datasets.Split.TRAIN,
|
| 856 |
# These kwargs will be passed to _generate_examples
|
| 857 |
gen_kwargs={
|
| 858 |
+
"filepath": paths[self.config.type][self.config.task_no]["train"],
|
| 859 |
+
"files": dl_manager.iter_archive(archive),
|
| 860 |
},
|
| 861 |
),
|
| 862 |
datasets.SplitGenerator(
|
| 863 |
name=datasets.Split.TEST,
|
| 864 |
# These kwargs will be passed to _generate_examples
|
| 865 |
gen_kwargs={
|
| 866 |
+
"filepath": paths[self.config.type][self.config.task_no]["test"],
|
| 867 |
+
"files": dl_manager.iter_archive(archive),
|
| 868 |
},
|
| 869 |
),
|
| 870 |
]
|
|
|
|
| 874 |
name=datasets.Split.VALIDATION,
|
| 875 |
# These kwargs will be passed to _generate_examples
|
| 876 |
gen_kwargs={
|
| 877 |
+
"filepath": paths[self.config.type][self.config.task_no]["valid"],
|
| 878 |
+
"files": dl_manager.iter_archive(archive),
|
| 879 |
},
|
| 880 |
),
|
| 881 |
]
|
| 882 |
return splits
|
| 883 |
|
| 884 |
+
def _generate_examples(self, filepath, files):
|
| 885 |
|
| 886 |
+
for path, f in files:
|
| 887 |
+
if path == filepath:
|
| 888 |
+
story = []
|
| 889 |
+
example_idx = 0
|
| 890 |
+
for idx, line in enumerate(f):
|
| 891 |
+
line = line.decode("utf-8")
|
| 892 |
+
if line.strip() == "":
|
| 893 |
+
if story != []:
|
| 894 |
+
yield example_idx, {"story": story}
|
| 895 |
+
example_idx += 1
|
| 896 |
+
story = []
|
| 897 |
+
elif line.strip().split()[0] == "1": # New story
|
| 898 |
+
if story != []: # Already some story, flush it out
|
| 899 |
+
yield example_idx, {"story": story}
|
| 900 |
+
example_idx += 1
|
| 901 |
+
story = []
|
| 902 |
+
line_no = line.split()[0]
|
| 903 |
+
line_split = line[len(line_no) :].strip().split("\t")
|
| 904 |
+
if len(line_split) > 1:
|
| 905 |
+
story.append(
|
| 906 |
+
{
|
| 907 |
+
"id": line_no,
|
| 908 |
+
"type": 1, # question
|
| 909 |
+
"supporting_ids": line_split[-1].split(" "),
|
| 910 |
+
"text": line_split[0].strip(),
|
| 911 |
+
"answer": line_split[1].strip(),
|
| 912 |
+
}
|
| 913 |
+
)
|
| 914 |
+
else:
|
| 915 |
+
story.append(
|
| 916 |
+
{
|
| 917 |
+
"id": line_no,
|
| 918 |
+
"type": 0, # context
|
| 919 |
+
"supporting_ids": [],
|
| 920 |
+
"text": line_split[0].strip(),
|
| 921 |
+
"answer": "",
|
| 922 |
+
}
|
| 923 |
+
)
|
| 924 |
+
else:
|
| 925 |
+
line_no = line.split()[0]
|
| 926 |
+
line_split = line[len(line_no) :].strip().split("\t")
|
| 927 |
+
if len(line_split) > 1:
|
| 928 |
+
story.append(
|
| 929 |
+
{
|
| 930 |
+
"id": line_no,
|
| 931 |
+
"type": 1, # question
|
| 932 |
+
"supporting_ids": line_split[-1].split(" "),
|
| 933 |
+
"text": line_split[0].strip(),
|
| 934 |
+
"answer": line_split[1].strip(),
|
| 935 |
+
}
|
| 936 |
+
)
|
| 937 |
+
else:
|
| 938 |
+
story.append(
|
| 939 |
+
{
|
| 940 |
+
"id": line_no,
|
| 941 |
+
"type": 0, # context
|
| 942 |
+
"supporting_ids": [],
|
| 943 |
+
"text": line_split[0].strip(),
|
| 944 |
+
"answer": "",
|
| 945 |
+
}
|
| 946 |
+
)
|
| 947 |
+
else: # After last line
|
| 948 |
if story != []:
|
| 949 |
yield example_idx, {"story": story}
|
| 950 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|