File size: 3,986 Bytes
293de77 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
""" Slovene corpus for coreference resolution coref149. """
import os
import xml.etree.ElementTree as ET
import datasets
_CITATION = """\
@article{coref149,
author={Žitnik, Slavko and Bajec, Marko},
title={Odkrivanje koreferenčnosti v slovenskem jeziku na označenih besedilih iz coref149},
journal={Slovenščina 2.0: empirične, aplikativne in interdisciplinarne raziskave},
number={1},
volume={6},
year={2018},
month={Jun.},
pages={37–67},
doi={10.4312/slo2.0.2018.1.37-67}
}
"""
_DESCRIPTION = """\
Slovene corpus for coreference resolution. Contains manually annotated coreferences.
"""
_HOMEPAGE = "http://hdl.handle.net/11356/1182"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"coref149": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1182/coref149_v1.0.zip"
}
class Coref149(datasets.GeneratorBasedBuilder):
"""Slovene corpus for coreference resolution."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"id_doc": datasets.Value("string"),
"words": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
"mentions": [{
"id_mention": datasets.Value("string"),
"mention_data": {
"idx_sent": datasets.Value("uint32"),
"word_indices": datasets.Sequence(datasets.Value("uint32")),
"global_word_indices": datasets.Sequence(datasets.Value("uint32"))
}
}],
"coref_clusters": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["coref149"]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": data_dir
}
)
]
def _generate_examples(self, data_dir):
TC_NAMESPACE = "{http://www.dspin.de/data/textcorpus}"
all_files = sorted([fname for fname in os.listdir(data_dir) if fname.endswith(".tcf")],
key=lambda _fname: int(_fname.split(".")[-2]))
for idx_file, curr_fname in enumerate(all_files):
curr_doc = ET.parse(os.path.join(data_dir, curr_fname))
root = curr_doc.getroot()
id_doc = curr_fname.split(os.path.sep)[-1]
token_tags = root.findall(f".//{TC_NAMESPACE}token")
id2tok, id2idx, id2globidx, id2sentidx = {}, {}, {}, {}
for idx_global, token in enumerate(token_tags):
id_token = token.attrib["ID"]
text_token = token.text.strip()
id2tok[id_token] = text_token
id2globidx[id_token] = idx_global
sent_tags = root.findall(f".//{TC_NAMESPACE}sentence")
words = []
for idx_sent, sent in enumerate(sent_tags):
token_ids = sent.attrib["tokenIDs"].split(" ")
for local_position, _id_tok in enumerate(token_ids):
id2sentidx[_id_tok] = idx_sent
id2idx[_id_tok] = local_position
words.append([id2tok[_id] for _id in token_ids])
mentions, clusters = [], []
for ent in root.findall(f".//{TC_NAMESPACE}entity"):
curr_cluster = []
for ref in ent.findall(f"{TC_NAMESPACE}reference"):
id_mention = f"{id_doc}.{ref.attrib['ID']}"
curr_cluster.append(id_mention)
curr_mention = {
"id_mention": id_mention,
"mention_data": {
"idx_sent": None,
"word_indices": [],
"global_word_indices": []
}
}
for id_token in ref.attrib['tokenIDs'].split(" "):
curr_mention["mention_data"]["idx_sent"] = id2sentidx[id_token]
curr_mention["mention_data"]["word_indices"].append(id2idx[id_token])
curr_mention["mention_data"]["global_word_indices"].append(id2globidx[id_token])
mentions.append(curr_mention)
clusters.append(curr_cluster)
yield idx_file, {
"id_doc": id_doc,
"words": words,
"mentions": mentions,
"coref_clusters": clusters
}
|