wmt22_african / wmt22_african.py
akshitab's picture
add readme
58f1c11
raw
history blame
8.19 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT22 African Languages Shared Task"""
import datasets
import csv
_CITATION = "" # TODO
_DESCRIPTION = "" # TODO
_HOMEPAGE = "" # TODO
_LICENSE = "BSD-License" # TODO
_LANGUAGE_PAIRS = [('afr', 'eng'), ('afr', 'som'), ('amh', 'eng'), ('amh', 'fra'), ('amh', 'nya'), ('amh', 'orm'), ('amh', 'sna'), ('amh', 'som'), ('amh', 'ssw'), ('amh', 'swh'), ('amh', 'tsn'), ('amh', 'tso'), ('amh', 'umb'), ('amh', 'xho'), ('amh', 'yor'), ('amh', 'zul'), ('eng', 'fuv'), ('eng', 'hau'), ('eng', 'ibo'), ('eng', 'kam'), ('eng', 'kin'), ('eng', 'lin'), ('eng', 'lug'), ('eng', 'luo'), ('eng', 'nso'), ('eng', 'nya'), ('eng', 'orm'), ('eng', 'sna'), ('eng', 'som'), ('eng', 'ssw'), ('eng', 'swh'), ('eng', 'tsn'), ('eng', 'tso'), ('eng', 'umb'), ('eng', 'wol'), ('eng', 'xho'), ('eng', 'yor'), ('eng', 'zul'), ('fra', 'hau'), ('fra', 'ibo'), ('fra', 'kam'), ('fra', 'kin'), ('fra', 'lin'), ('fra', 'lug'), ('fra', 'luo'), ('fra', 'nso'), ('fra', 'nya'), ('fra', 'orm'), ('fra', 'som'), ('fra', 'ssw'), ('fra', 'swh'), ('fra', 'tsn'), ('fra', 'tso'), ('fra', 'umb'), ('fra', 'wol'), ('fra', 'xho'), ('fra', 'zul'), ('fuv', 'hau'), ('fuv', 'ibo'), ('fuv', 'kam'), ('fuv', 'kin'), ('fuv', 'lug'), ('fuv', 'luo'), ('fuv', 'nso'), ('fuv', 'nya'), ('fuv', 'orm'), ('fuv', 'sna'), ('fuv', 'som'), ('fuv', 'ssw'), ('fuv', 'swh'), ('fuv', 'tsn'), ('fuv', 'tso'), ('fuv', 'umb'), ('fuv', 'xho'), ('fuv', 'yor'), ('fuv', 'zul'), ('hau', 'ibo'), ('hau', 'kam'), ('hau', 'kin'), ('hau', 'lug'), ('hau', 'luo'), ('hau', 'nso'), ('hau', 'nya'), ('hau', 'orm'), ('hau', 'sna'), ('hau', 'som'), ('hau', 'ssw'), ('hau', 'swh'), ('hau', 'tsn'), ('hau', 'tso'), ('hau', 'umb'), ('hau', 'xho'), ('hau', 'yor'), ('hau', 'zul'), ('ibo', 'kam'), ('ibo', 'kin'), ('ibo', 'lug'), ('ibo', 'luo'), ('ibo', 'nso'), ('ibo', 'nya'), ('ibo', 'orm'), ('ibo', 'sna'), ('ibo', 'som'), ('ibo', 'ssw'), ('ibo', 'swh'), ('ibo', 'tsn'), ('ibo', 'tso'), ('ibo', 'umb'), ('ibo', 'xho'), ('ibo', 'yor'), ('ibo', 'zul'), ('kam', 'kin'), ('kam', 'lug'), ('kam', 'luo'), ('kam', 'nso'), ('kam', 'nya'), ('kam', 'orm'), ('kam', 'sna'), ('kam', 'som'), ('kam', 'ssw'), ('kam', 'swh'), ('kam', 'tsn'), ('kam', 'tso'), ('kam', 'umb'), ('kam', 'xho'), ('kam', 'yor'), ('kam', 'zul'), ('kin', 'lug'), ('kin', 'luo'), ('kin', 'nso'), ('kin', 'nya'), ('kin', 'orm'), ('kin', 'sna'), ('kin', 'som'), ('kin', 'ssw'), ('kin', 'swh'), ('kin', 'tsn'), ('kin', 'tso'), ('kin', 'umb'), ('kin', 'xho'), ('kin', 'yor'), ('kin', 'zul'), ('lug', 'luo'), ('lug', 'nso'), ('lug', 'nya'), ('lug', 'orm'), ('lug', 'sna'), ('lug', 'som'), ('lug', 'ssw'), ('lug', 'swh'), ('lug', 'tsn'), ('lug', 'tso'), ('lug', 'umb'), ('lug', 'xho'), ('lug', 'yor'), ('lug', 'zul'), ('luo', 'nso'), ('luo', 'nya'), ('luo', 'orm'), ('luo', 'sna'), ('luo', 'som'), ('luo', 'ssw'), ('luo', 'swh'), ('luo', 'tsn'), ('luo', 'tso'), ('luo', 'umb'), ('luo', 'xho'), ('luo', 'yor'), ('luo', 'zul'), ('nso', 'nya'), ('nso', 'orm'), ('nso', 'sna'), ('nso', 'som'), ('nso', 'ssw'), ('nso', 'swh'), ('nso', 'tsn'), ('nso', 'tso'), ('nso', 'umb'), ('nso', 'xho'), ('nso', 'yor'), ('nso', 'zul'), ('nya', 'orm'), ('nya', 'sna'), ('nya', 'som'), ('nya', 'ssw'), ('nya', 'swh'), ('nya', 'tsn'), ('nya', 'tso'), ('nya', 'umb'), ('nya', 'xho'), ('nya', 'yor'), ('nya', 'zul'), ('orm', 'sna'), ('orm', 'som'), ('orm', 'ssw'), ('orm', 'swh'), ('orm', 'tsn'), ('orm', 'tso'), ('orm', 'umb'), ('orm', 'xho'), ('orm', 'yor'), ('orm', 'zul'), ('sna', 'som'), ('sna', 'ssw'), ('sna', 'swh'), ('sna', 'tsn'), ('sna', 'tso'), ('sna', 'umb'), ('sna', 'xho'), ('sna', 'yor'), ('sna', 'zul'), ('som', 'ssw'), ('som', 'swh'), ('som', 'tsn'), ('som', 'tso'), ('som', 'umb'), ('som', 'wol'), ('som', 'xho'), ('som', 'yor'), ('som', 'zul'), ('ssw', 'swh'), ('ssw', 'tsn'), ('ssw', 'tso'), ('ssw', 'umb'), ('ssw', 'xho'), ('ssw', 'yor'), ('ssw', 'zul'), ('swh', 'tsn'), ('swh', 'tso'), ('swh', 'umb'), ('swh', 'xho'), ('swh', 'yor'), ('swh', 'zul'), ('tsn', 'tso'), ('tsn', 'umb'), ('tsn', 'xho'), ('tsn', 'yor'), ('tsn', 'zul'), ('tso', 'umb'), ('tso', 'xho'), ('tso', 'yor'), ('tso', 'zul'), ('umb', 'xho'), ('umb', 'yor'), ('umb', 'zul'), ('xho', 'yor'), ('xho', 'zul'), ('yor', 'zul')]
# _URL_BASE = "https://huggingface.co/datasets/allenai/wmt22_african/tree/main/"
_URL_BASE = "./"
_URLs = {f"{src_lg}-{trg_lg}": f"{_URL_BASE}wmt22_african_{src_lg}-{trg_lg}.gz" for src_lg, trg_lg in _LANGUAGE_PAIRS}
class Wmt22AfricanTaskConfig(datasets.BuilderConfig):
"""BuilderConfig for WMT22 African Shared Task."""
def __init__(self, src_lg, tgt_lg, **kwargs):
super(Wmt22AfricanTaskConfig, self).__init__(**kwargs)
self.src_lg = src_lg
self.tgt_lg = tgt_lg
class Wmt22African(datasets.GeneratorBasedBuilder):
"""WMT African Languages Shared Task."""
BUILDER_CONFIGS = [
Wmt22AfricanTaskConfig(
name=f"{src_lg}-{tgt_lg}",
version=datasets.Version("1.1.0"),
description=f"WMT 2022 African Languages: {src_lg} - {tgt_lg}",
src_lg=src_lg,
tgt_lg=tgt_lg,
)
for (src_lg, tgt_lg) in _LANGUAGE_PAIRS
]
BUILDER_CONFIG_CLASS = Wmt22AfricanTaskConfig
def _info(self):
# define feature types
features = datasets.Features(
{
'translation': datasets.Translation(languages=(self.config.src_lg, self.config.tgt_lg)),
'laser_score':datasets.Value("float32"),
'source_sentence_lid':datasets.Value("float32"),
'target_sentence_lid':datasets.Value("float32"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
pair = f"{self.config.src_lg}-{self.config.tgt_lg}" # string identifier for language pair
url = _URLs[pair] # url for download of pair-specific file
data_file = dl_manager.download_and_extract(url) # extract downloaded data and store path in data_file
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
"source_lg": self.config.src_lg,
"target_lg": self.config.tgt_lg,
}
)
]
def _generate_examples(self, filepath, source_lg, target_lg):
with open(filepath, encoding="utf-8") as f:
# reader = csv.reader(f, delimiter="\t")
for id_, example in enumerate(f):
try:
datarow = example.split("\t")
row = {}
row["translation"] = {source_lg : datarow[0], target_lg: datarow[1]} # create translation json
scores = datarow[2].split(" ")
row["laser_score"] = float(scores[0])
row["source_sentence_lid"] = float(scores[1])
row["target_sentence_lid"] = float(scores[2])
row = {k: None if not v else v for k, v in row.items()} # replace empty values
except:
print(datarow)
raise
yield id_, row
# to test the script, go to the root folder of the repo (wmt22_african) and run:
# datasets-cli test wmt22_african --save_infos --all_configs