Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- qg_squadshifts.py +12 -12
qg_squadshifts.py
CHANGED
@@ -8,22 +8,22 @@ _DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-webs
|
|
8 |
_URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed'
|
9 |
_FILES = {
|
10 |
str(datasets.Split.TEST): {
|
11 |
-
'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(
|
12 |
-
'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(
|
13 |
-
'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(
|
14 |
-
'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(
|
15 |
},
|
16 |
str(datasets.Split.TRAIN): {
|
17 |
-
'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(
|
18 |
-
'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(
|
19 |
-
'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(
|
20 |
-
'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(
|
21 |
},
|
22 |
str(datasets.Split.VALIDATION): {
|
23 |
-
'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(
|
24 |
-
'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(
|
25 |
-
'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(
|
26 |
-
'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(
|
27 |
},
|
28 |
}
|
29 |
# _FILES = {
|
|
|
8 |
_URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed'
|
9 |
_FILES = {
|
10 |
str(datasets.Split.TEST): {
|
11 |
+
'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)],
|
12 |
+
'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(4)],
|
13 |
+
'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(4)],
|
14 |
+
'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(4)]
|
15 |
},
|
16 |
str(datasets.Split.TRAIN): {
|
17 |
+
'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(2)],
|
18 |
+
'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(3)],
|
19 |
+
'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(3)],
|
20 |
+
'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(3)]
|
21 |
},
|
22 |
str(datasets.Split.VALIDATION): {
|
23 |
+
'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(2)],
|
24 |
+
'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(3)],
|
25 |
+
'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(3)],
|
26 |
+
'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(3)]
|
27 |
},
|
28 |
}
|
29 |
# _FILES = {
|