Datasets:
lmqg
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
f8e6d3e
·
1 Parent(s): 41dd509
Files changed (43) hide show
  1. .gitattributes +12 -0
  2. data/processed/amazon.test.jsonl +3 -0
  3. data/processed/amazon.test00.jsonl +0 -0
  4. data/processed/amazon.test01.jsonl +0 -0
  5. data/processed/amazon.test02.jsonl +0 -0
  6. data/processed/amazon.test03.jsonl +0 -0
  7. data/processed/amazon.test04.jsonl +0 -0
  8. data/processed/amazon.test05.jsonl +0 -0
  9. data/processed/amazon.test06.jsonl +0 -0
  10. data/processed/amazon.train.jsonl +3 -0
  11. data/processed/amazon.validation.jsonl +3 -0
  12. data/processed/new_wiki.test.jsonl +3 -0
  13. data/processed/new_wiki.test00.jsonl +0 -0
  14. data/processed/new_wiki.test01.jsonl +0 -0
  15. data/processed/new_wiki.test02.jsonl +0 -0
  16. data/processed/new_wiki.test03.jsonl +0 -0
  17. data/processed/new_wiki.test04.jsonl +0 -0
  18. data/processed/new_wiki.test05.jsonl +0 -0
  19. data/processed/new_wiki.train.jsonl +3 -0
  20. data/processed/new_wiki.validation.jsonl +3 -0
  21. data/processed/nyt.test.jsonl +3 -0
  22. data/processed/nyt.test00.jsonl +0 -0
  23. data/processed/nyt.test01.jsonl +0 -0
  24. data/processed/nyt.test02.jsonl +0 -0
  25. data/processed/nyt.test03.jsonl +0 -0
  26. data/processed/nyt.test04.jsonl +0 -0
  27. data/processed/nyt.test05.jsonl +0 -0
  28. data/processed/nyt.test06.jsonl +0 -0
  29. data/processed/nyt.train.jsonl +3 -0
  30. data/processed/nyt.validation.jsonl +3 -0
  31. data/processed/reddit.test.jsonl +3 -0
  32. data/processed/reddit.test00.jsonl +0 -0
  33. data/processed/reddit.test01.jsonl +0 -0
  34. data/processed/reddit.test02.jsonl +0 -0
  35. data/processed/reddit.test03.jsonl +0 -0
  36. data/processed/reddit.test04.jsonl +0 -0
  37. data/processed/reddit.test05.jsonl +0 -0
  38. data/processed/reddit.test06.jsonl +0 -0
  39. data/processed/reddit.train.jsonl +3 -0
  40. data/processed/reddit.validation.jsonl +3 -0
  41. generate_reference_files.py +1 -2
  42. process.py +47 -18
  43. qg_squadshifts.py +38 -7
.gitattributes CHANGED
@@ -35,3 +35,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ data/processed/nyt.train.jsonl filter=lfs diff=lfs merge=lfs -text
39
+ data/processed/reddit.test.jsonl filter=lfs diff=lfs merge=lfs -text
40
+ data/processed/reddit.train.jsonl filter=lfs diff=lfs merge=lfs -text
41
+ data/processed/amazon.test.jsonl filter=lfs diff=lfs merge=lfs -text
42
+ data/processed/amazon.validation.jsonl filter=lfs diff=lfs merge=lfs -text
43
+ data/processed/new_wiki.test.jsonl filter=lfs diff=lfs merge=lfs -text
44
+ data/processed/new_wiki.validation.jsonl filter=lfs diff=lfs merge=lfs -text
45
+ data/processed/reddit.validation.jsonl filter=lfs diff=lfs merge=lfs -text
46
+ data/processed/amazon.train.jsonl filter=lfs diff=lfs merge=lfs -text
47
+ data/processed/new_wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
48
+ data/processed/nyt.test.jsonl filter=lfs diff=lfs merge=lfs -text
49
+ data/processed/nyt.validation.jsonl filter=lfs diff=lfs merge=lfs -text
data/processed/amazon.test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453543a602bf84dba127ed8f40a8f8e28fd7485d4ad2f751a2530aa86331ab41
3
+ size 13964060
data/processed/amazon.test00.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test01.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test02.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test03.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test04.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test05.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test06.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2c6a144dfe7e3c2d2627eb7bd20234c18d9bcd087bce8f77b3d63225f00ef99
3
+ size 9249730
data/processed/amazon.validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2c6a144dfe7e3c2d2627eb7bd20234c18d9bcd087bce8f77b3d63225f00ef99
3
+ size 9249730
data/processed/new_wiki.test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a5fb4ccd7044aca54e5b55f27f566dd87ea3ad92ab28dbd0297296dc08497ed
3
+ size 11881002
data/processed/new_wiki.test00.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test01.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test02.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test03.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test04.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test05.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d67b011718268a35cfa136b224cc3b6ce2f58af509ea9c635e219b2326886f
3
+ size 7979291
data/processed/new_wiki.validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d67b011718268a35cfa136b224cc3b6ce2f58af509ea9c635e219b2326886f
3
+ size 7979291
data/processed/nyt.test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1097a407661f94281d244b31e96b3bc8a70a54d8dd042d8028a9b6b00dcc17b0
3
+ size 15441744
data/processed/nyt.test00.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test01.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test02.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test03.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test04.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test05.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test06.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccec3f7634924d008862f4004f33ec05c1a62bc676da6e6da0d241b98bcfe92c
3
+ size 10254035
data/processed/nyt.validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccec3f7634924d008862f4004f33ec05c1a62bc676da6e6da0d241b98bcfe92c
3
+ size 10254035
data/processed/reddit.test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6efde6314b7f0310f620cde567756d28a7b954b9e31420db5c379d436d97cd7f
3
+ size 13942662
data/processed/reddit.test00.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test01.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test02.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test03.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test04.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test05.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test06.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61ee48ccf1a17e4c5e9b9385afc6bf045bc0aae1f73df64848a8b1d8924269de
3
+ size 9218046
data/processed/reddit.validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61ee48ccf1a17e4c5e9b9385afc6bf045bc0aae1f73df64848a8b1d8924269de
3
+ size 9218046
generate_reference_files.py CHANGED
@@ -6,12 +6,11 @@ dataset_name = 'asahi417/qg_squadshifts'
6
  os.makedirs('./reference_files', exist_ok=True)
7
 
8
 
9
- for split in ['test']:
10
  for domain in ["default", 'new_wiki', 'nyt', 'reddit', 'amazon']:
11
  dataset = load_dataset(dataset_name, domain, split=split)
12
  for data in ['question', 'answer', 'sentence', 'paragraph']:
13
  with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
14
-
15
  if data == 'paragraph':
16
  tmp_data = dataset['paragraph_id']
17
  else:
 
6
  os.makedirs('./reference_files', exist_ok=True)
7
 
8
 
9
+ for split in ['test', 'validation']:
10
  for domain in ["default", 'new_wiki', 'nyt', 'reddit', 'amazon']:
11
  dataset = load_dataset(dataset_name, domain, split=split)
12
  for data in ['question', 'answer', 'sentence', 'paragraph']:
13
  with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
 
14
  if data == 'paragraph':
15
  tmp_data = dataset['paragraph_id']
16
  else:
process.py CHANGED
@@ -13,14 +13,16 @@ rm -rf amazon.test.jsonl
13
  import json
14
  import os
15
  import re
16
- import spacy
17
-
18
  from tqdm import tqdm
 
 
19
  from datasets import load_dataset
20
 
21
  DATASET_NAME = "squadshifts"
22
  DATASET_TYPES = ['new_wiki', 'nyt', 'reddit', 'amazon']
23
  HIGHLIGHT_TOKEN = '<hl>'
 
24
  SPLITTER = spacy.load('en_core_web_sm')
25
 
26
 
@@ -88,19 +90,46 @@ if __name__ == '__main__':
88
  os.makedirs(output, exist_ok=True)
89
  for data_type in DATASET_TYPES:
90
  dataset = load_dataset(DATASET_NAME, data_type)
91
- for _split in dataset.keys():
92
- tmp_dataset = dataset[_split]
93
- with open(f'{output}/{data_type}.{_split}.jsonl', 'w') as f:
94
- for single_data in tqdm(tmp_dataset):
95
- question_str = single_data['question'] #.replace("\n", ".").replace('"', "'")
96
- paragraph_str = single_data['context'] #.replace("\n", ".").replace('"', "'")
97
- answer_str = single_data['answers']['text']
98
- if type(answer_str) == list:
99
- answer_str = answer_str[0]
100
- # answer_str = answer_str.replace("\n", ".").replace('"', "'")
101
- assert type(answer_str) is str, answer_str
102
- assert type(question_str) is str, question_str
103
- assert type(paragraph_str) is str, paragraph_str
104
- tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
105
- tmp_data['paragraph_id'] = single_data['id']
106
- f.write(json.dumps(tmp_data) + '\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  import json
14
  import os
15
  import re
16
+ from random import shuffle, seed
 
17
  from tqdm import tqdm
18
+
19
+ import spacy
20
  from datasets import load_dataset
21
 
22
  DATASET_NAME = "squadshifts"
23
  DATASET_TYPES = ['new_wiki', 'nyt', 'reddit', 'amazon']
24
  HIGHLIGHT_TOKEN = '<hl>'
25
+ GENERATE_TEST_SPLIT = True
26
  SPLITTER = spacy.load('en_core_web_sm')
27
 
28
 
 
90
  os.makedirs(output, exist_ok=True)
91
  for data_type in DATASET_TYPES:
92
  dataset = load_dataset(DATASET_NAME, data_type)
93
+ _split = 'test'
94
+ tmp_dataset = dataset[_split]
95
+ full_data = []
96
+ for single_data in tqdm(tmp_dataset):
97
+ question_str = single_data['question'] #.replace("\n", ".").replace('"', "'")
98
+ paragraph_str = single_data['context'] #.replace("\n", ".").replace('"', "'")
99
+ answer_str = single_data['answers']['text']
100
+ if type(answer_str) == list:
101
+ answer_str = answer_str[0]
102
+ assert type(answer_str) is str, answer_str
103
+ assert type(question_str) is str, question_str
104
+ assert type(paragraph_str) is str, paragraph_str
105
+ tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
106
+ tmp_data['paragraph_id'] = single_data['id']
107
+ full_data.append(tmp_data)
108
+
109
+ # split test into train/valid/test
110
+ test_size = int(len(full_data)/2)
111
+ train_size = int((len(full_data) - test_size) * 2/3)
112
+ valid_size = len(full_data) - train_size - test_size
113
+ assert train_size + test_size + valid_size == len(full_data), f"{train_size}, {test_size}, {valid_size}"
114
+ paragraph_ids = list(set([i['paragraph_id'] for i in full_data]))
115
+ data_dict = {p: [i for i in full_data if i['paragraph_id'] == p] for p in paragraph_ids}
116
+ seed(0)
117
+ shuffle(paragraph_ids)
118
+ lines_train = []
119
+ lines_test = []
120
+ lines_valid = []
121
+
122
+ for i in paragraph_ids:
123
+ if len(lines_test) < test_size:
124
+ lines_test += data_dict[i]
125
+ elif len(lines_train) < train_size:
126
+ lines_train += data_dict[i]
127
+ else:
128
+ lines_valid += data_dict[i]
129
+ print(f'STATS(train/valid/test): {data_type}| {len(lines_train)}/{len(lines_valid)}/{len(lines_test)}')
130
+ with open(f'{output}/{data_type}.test.jsonl', 'w') as f:
131
+ f.write('\n'.join([json.dumps(i) for i in lines_test]))
132
+ with open(f'{output}/{data_type}.train.jsonl', 'w') as f:
133
+ f.write('\n'.join([json.dumps(i) for i in lines_train]))
134
+ with open(f'{output}/{data_type}.validation.jsonl', 'w') as f:
135
+ f.write('\n'.join([json.dumps(i) for i in lines_train]))
qg_squadshifts.py CHANGED
@@ -6,14 +6,45 @@ import datasets
6
  logger = datasets.logging.get_logger(__name__)
7
  _DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
8
  _URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  _FILES = {
10
- str(datasets.Split.TEST):
11
- {
12
- 'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(6)],
13
- 'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(7)],
14
- 'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(7)],
15
- 'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(7)]
16
- }
 
 
 
 
 
 
 
 
 
 
 
17
  }
18
  _DOMAIN = list(_FILES[list(_FILES.keys())[0]].keys())
19
 
 
6
  logger = datasets.logging.get_logger(__name__)
7
  _DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
8
  _URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed'
9
+ # _FILES = {
10
+ # str(datasets.Split.TEST): {
11
+ # 'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(6)],
12
+ # 'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(7)],
13
+ # 'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(7)],
14
+ # 'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(7)]
15
+ # },
16
+ # str(datasets.Split.TRAIN): {
17
+ # 'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(6)],
18
+ # 'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(7)],
19
+ # 'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(7)],
20
+ # 'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(7)]
21
+ # },
22
+ # str(datasets.Split.VALIDATION): {
23
+ # 'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(6)],
24
+ # 'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(7)],
25
+ # 'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(7)],
26
+ # 'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(7)]
27
+ # },
28
+ # }
29
  _FILES = {
30
+ str(datasets.Split.TEST): {
31
+ 'new_wiki': [f'{_URL}/new_wiki.test.jsonl'],
32
+ 'nyt': [f'{_URL}/nyt.test.jsonl'],
33
+ 'reddit': [f'{_URL}/reddit.test.jsonl'],
34
+ 'amazon': [f'{_URL}/amazon.test.jsonl']
35
+ },
36
+ str(datasets.Split.TRAIN): {
37
+ 'new_wiki': [f'{_URL}/new_wiki.train.jsonl'],
38
+ 'nyt': [f'{_URL}/nyt.train.jsonl'],
39
+ 'reddit': [f'{_URL}/reddit.train.jsonl'],
40
+ 'amazon': [f'{_URL}/amazon.train.jsonl']
41
+ },
42
+ str(datasets.Split.VALIDATION): {
43
+ 'new_wiki': [f'{_URL}/new_wiki.validation.jsonl'],
44
+ 'nyt': [f'{_URL}/nyt.validation.jsonl'],
45
+ 'reddit': [f'{_URL}/reddit.validation.jsonl'],
46
+ 'amazon': [f'{_URL}/amazon.validation.jsonl']
47
+ },
48
  }
49
  _DOMAIN = list(_FILES[list(_FILES.keys())[0]].keys())
50