Datasets:
File size: 8,203 Bytes
a5d94b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
"""(A publicly available subsample of) a reference corpus of Slovene texts."""
import glob
import logging
import os
import os.path
import re
import xml.etree.ElementTree as ET
from copy import deepcopy
import datasets
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
def namespace(element):
# https://stackoverflow.com/a/12946675
m = re.match(r'\{.*\}', element.tag)
return m.group(0) if m else ''
_CITATION = """\
@misc{ccGigafida,
title = {Written corpus {ccGigafida} 1.0},
author = {Logar, Nata{\v s}a and Erjavec, Toma{\v z} and Krek, Simon and Gr{\v c}ar, Miha and Holozan, Peter},
url = {http://hdl.handle.net/11356/1035},
note = {Slovenian language resource repository {CLARIN}.{SI}},
copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)},
issn = {2820-4042},
year = {2013}
}
"""
_DESCRIPTION = """\
The ccGigafida corpus contains a subsample of the Gigafida corpus. The Gigafida corpus is an extensive collection of
Slovene text of various genres, from daily newspapers, magazines, all kinds of books (fiction, non-fiction, textbooks),
web pages, transcriptions of parliamentary debates and similar.
"""
_HOMEPAGE = "http://eng.slovenscina.eu/korpusi/proste-zbirke"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"ccGigafida": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1035/ccGigafidaV1_0.zip"
}
class CcGigafida(datasets.GeneratorBasedBuilder):
"""(A publicly available subsample of) a reference corpus of Slovene texts."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"id_doc": datasets.Value("string"),
"doc_title": datasets.Value("string"),
"authors": datasets.Sequence(datasets.Value("string")),
"publish_date": datasets.Value("string"),
"publisher": datasets.Value("string"),
"genres": datasets.Sequence(datasets.Value("string")),
"doc_tokenized": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
"doc_string": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
"id_sents": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# Allow user to specify path to the full Gigafida directory: `load_dataset(..., data_dir=...)`
if dl_manager.manual_dir is not None:
data_dir = dl_manager.manual_dir
else:
urls = _URLS["ccGigafida"]
data_dir = dl_manager.download_and_extract(urls)
data_dir = os.path.join(data_dir, "ccGigafidaV1_0")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir}
)
]
def _generate_examples(self, data_dir):
GENRE_MAPPING = {
"SSJ.T": "tisk", "SSJ.T.K": "tisk/knjižno", "SSJ.T.K.L": "tisk/knjižno/leposlovno",
"SSJ.T.K.S": "tisk/knjižno/strokovno", "SSJ.T.P": "tisk/periodično", "SSJ.T.P.C": "tisk/periodično/časopis",
"SSJ.T.P.R": "tisk/periodično/revija", "SSJ.T.D": "tisk/drugo", "SSJ.I": "internet"
}
# genres are prefixed by "ssj:" in Gigafida 2.0
for genre, description in deepcopy(GENRE_MAPPING).items():
GENRE_MAPPING[f"ssj:{genre}"] = description
# Recursively search for xml files in subdirectories
all_files = [os.path.join(data_dir, file_name)
for file_name in glob.glob(os.path.join(data_dir, "**", "*.xml"), recursive=True)
if os.path.isfile(os.path.join(data_dir, file_name))]
all_files = sorted(all_files) # fix order
for _idx_file, file_path in enumerate(all_files):
curr_doc = ET.parse(file_path)
root = curr_doc.getroot()
NAMESPACE = namespace(root)
id_doc = root.attrib[f"{XML_NAMESPACE}id"]
# Document metadata
bibl_el = root.find(f".//{NAMESPACE}bibl")
doc_title = bibl_el.find(f"{NAMESPACE}title").text.strip()
authors = list(map(lambda _tag: _tag.text.strip(), bibl_el.findall(f"{NAMESPACE}author")))
publish_date = bibl_el.find(f"{NAMESPACE}date").text.strip()
publisher = bibl_el.find(f"{NAMESPACE}publisher").text.strip()
category_tags = root.findall(f".//{NAMESPACE}catRef")
genres = []
for _tag in category_tags:
# in ccGigafida, the genres are noted with a "#" prefix
__tag = _tag.attrib["target"][1:] if _tag.attrib["target"].startswith("#") else _tag.attrib["target"]
mapped_tag = GENRE_MAPPING.get(__tag, None)
# In addition to the genre of the document, there is sometimes a category assigned by the deduplication tool (dedup:nodup)
if mapped_tag is None:
continue
genres.append(mapped_tag)
# Tokenized and raw string version - raw string version preserves spaces
body_tag = root.find(f".//{NAMESPACE}body")
tokenized_doc, doc_str = [], []
doc_sent_ids = []
for para_tag in body_tag.findall(f".//{NAMESPACE}p"):
id_para = para_tag.attrib[f"{XML_NAMESPACE}id"]
tokenized_para, para_str = [], []
para_sent_ids = []
for _idx_sent, sent_tag in enumerate(para_tag.findall(f".//{NAMESPACE}s")):
# ccGigafida does not have sentence IDs:
# construct ID by taking the paragraph ID + their index in the paragraph
id_sent = sent_tag.attrib.get(f"{XML_NAMESPACE}id", None)
if id_sent is None:
id_sent = f"{id_para}.{_idx_sent}"
tokenized_sent, str_sent = [], []
for child_tag in sent_tag:
tag_str = child_tag.tag[len(NAMESPACE):]
if tag_str not in {"w", "S", "c", "pc"}:
logging.warning(f"Found unexpected tag in a sentence: '{tag_str}', skipping it.")
continue
# Tag for whitespace in ccGigafida
if tag_str == "S":
str_sent.append(" ")
# Tag for:
# - single-letter characters in ccGigafida;
# - whitespace in Gigafida
elif tag_str == "c":
str_sent.append(child_tag.text)
if child_tag.text != " ":
tokenized_sent.append(child_tag.text)
# word or punctuation character
else:
str_sent.append(child_tag.text)
tokenized_sent.append(child_tag.text)
str_sent = "".join(str_sent)
tokenized_para.append(tokenized_sent)
para_str.append(str_sent)
para_sent_ids.append(id_sent)
tokenized_doc.append(tokenized_para)
doc_str.append(para_str)
doc_sent_ids.append(para_sent_ids)
yield _idx_file, {
"id_doc": id_doc,
"doc_title": doc_title,
"authors": authors,
"publish_date": publish_date,
"publisher": publisher,
"genres": genres,
"doc_tokenized": tokenized_doc,
"doc_string": doc_str,
"id_sents": doc_sent_ids
}
|