Datasets:
File size: 3,648 Bytes
c8460c9 d2d505b c8460c9 9ab2868 c8460c9 9ab2868 d2d505b d59c804 d2d505b d59c804 d2d505b 3736c70 c8460c9 9ab2868 d2d505b 3736c70 d2d505b 3736c70 c8460c9 e23a4ae c8460c9 e23a4ae c8460c9 e23a4ae c8460c9 9ab2868 c8460c9 9ab2868 c8460c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import csv
import datasets
from datasets import Dataset, DatasetInfo, Features, ClassLabel, Value, Sequence, DatasetDict
import json
from io import StringIO
from pathlib import Path
import pandas as pd
_CITATION = """"""
_DESCRIPTION = """"""
_HOMEPAGE = ""
_URLS = {
"questions": "data/questions.json.zip",
"questions_aux": "data/questions_aux.json.zip",
"statutes": "data/statutes.tsv.zip",
}
_CONFIGS = {}
_CONFIGS["questions"] = {
"description": "Questions about housing law.",
"features" : Features({
'idx': Value('int32'),
'state': Value('string'),
'question': Value('string'),
'answer': Value('string'),
'question_group': Value('int32'),
'statutes': [{
'statute_idx': Value('int32'),
'citation': Value('string'),
'excerpt': Value('string'),
}],
'original_question': Value('string'),
'caveats': Sequence(Value('string')),
}),
"license": None,
}
_CONFIGS["questions_aux"] = {
"description": "An auxilliary set of larger questions about housing law, without statutory annotations.",
"features" : Features({
'idx': Value('int32'),
'state': Value('string'),
'question': Value('string'),
'answer': Value('string'),
'question_group': Value('int32'),
'statutes': Sequence({
'citation': Value('string'),
'excerpt': Value('string'),
}),
'original_question': Value('string'),
'caveats': Sequence(Value('string')),
}),
"license": None,
}
_CONFIGS["statutes"] = {
"description": "Corpus of statutes",
"features": Features({
"citation": datasets.Value("string"),
"path": datasets.Value("string"),
"state": datasets.Value("string"),
"text": datasets.Value("string"),
"idx": datasets.Value("int32"),
}),
"license": None,
}
class HousingQA(datasets.GeneratorBasedBuilder):
"""TODO"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=task, version=datasets.Version("1.0.0"), description=task,
)
for task in _CONFIGS
]
def _info(self):
features = _CONFIGS[self.config.name]["features"]
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_CONFIGS[self.config.name]["license"],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file_dir = Path(dl_manager.download_and_extract(_URLS[self.config.name]))
return [
datasets.SplitGenerator(
name="corpus" if self.config.name == "statutes" else "test",
gen_kwargs={
"downloaded_file_dir": downloaded_file_dir,
"name": self.config.name,
},
),
]
def _generate_examples(self, downloaded_file_dir, name):
"""Yields examples as (key, example) tuples."""
if name in ["questions", "questions_aux"]:
fpath = downloaded_file_dir / f"{name}.json"
data = json.loads(fpath.read_text())
for id_line, data in enumerate(data):
yield id_line, data
if name in ["statutes"]:
fpath = downloaded_file_dir / f"{name}.tsv"
data = pd.read_csv(fpath, sep="\t", dtype={'index': 'int32'})
data = data.to_dict(orient="records")
for id_line, data in enumerate(data):
yield id_line, data |