Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
update
Browse files
generate_reference_files.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
from datasets import load_dataset
|
3 |
|
4 |
os.makedirs('./reference_files', exist_ok=True)
|
@@ -9,8 +10,9 @@ for split in ['validation', 'test']:
|
|
9 |
dataset = load_dataset('asahi417/qg_subjqa', domain, split=split)
|
10 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
|
12 |
-
# if data == 'paragraph':
|
13 |
-
# f.write('\n'.join(dataset['paragraph_id']))
|
14 |
-
# else:
|
15 |
f.write('\n'.join(dataset[data]))
|
|
|
|
|
|
|
|
|
16 |
|
|
|
1 |
import os
|
2 |
+
from glob import glob
|
3 |
from datasets import load_dataset
|
4 |
|
5 |
os.makedirs('./reference_files', exist_ok=True)
|
|
|
10 |
dataset = load_dataset('asahi417/qg_subjqa', domain, split=split)
|
11 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
12 |
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
|
|
|
|
|
|
|
13 |
f.write('\n'.join(dataset[data]))
|
14 |
+
if domain != 'default':
|
15 |
+
length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{split}*{domain}*.txt')]
|
16 |
+
assert len(list(set(length))) == 1, length
|
17 |
+
|
18 |
|