Datasets:

ArXiv:
License:
holylovenia commited on
Commit
b7cb076
·
verified ·
1 Parent(s): 8114354

Upload wikimatrix.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wikimatrix.py +277 -0
wikimatrix.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
9
+ DEFAULT_SOURCE_VIEW_NAME, Licenses,
10
+ Tasks)
11
+
12
+ _DATASETNAME = "wikimatrix"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
15
+
16
+ # ilo min sun are actually not available
17
+ _LANGUAGES = ["ilo", "min", "jav", "sun", "ceb", "ind", "tgl", "vie"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
18
+ _LOCAL = False
19
+ _CITATION = """\
20
+ @inproceedings{schwenk-etal-2021-wikimatrix,
21
+ title = "{W}iki{M}atrix: Mining 135{M} Parallel Sentences in 1620 Language Pairs from {W}ikipedia",
22
+ author = "Schwenk, Holger and
23
+ Chaudhary, Vishrav and
24
+ Sun, Shuo and
25
+ Gong, Hongyu and
26
+ Guzm{\'a}n, Francisco",
27
+ editor = "Merlo, Paola and
28
+ Tiedemann, Jorg and
29
+ Tsarfaty, Reut",
30
+ booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
31
+ month = apr,
32
+ year = "2021",
33
+ address = "Online",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://aclanthology.org/2021.eacl-main.115",
36
+ doi = "10.18653/v1/2021.eacl-main.115",
37
+ pages = "1351--1361",
38
+ abstract = "We present an approach based on multilingual sentence embeddings to automatically extract parallel sentences from the content
39
+ of Wikipedia articles in 96 languages, including several dialects or low-resource languages. We do not limit the extraction process to
40
+ alignments with English, but we systematically consider all possible language pairs. In total, we are able to extract 135M parallel sentences
41
+ for 16720 different language pairs, out of which only 34M are aligned with English. This corpus is freely available. To get an indication
42
+ on the quality of the extracted bitexts, we train neural MT baseline systems on the mined data only for 1886 languages pairs, and evaluate
43
+ them on the TED corpus, achieving strong BLEU scores for many language pairs. The WikiMatrix bitexts seem to be particularly interesting
44
+ to train MT systems between distant languages without the need to pivot through English.",
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """\
49
+ WikiMatrix is automatically extracted parallel sentences from the content of Wikipedia articles in 96 languages, including several dialects
50
+ or low-resource languages. 8 languages among them are spoken in Southeast Asia region. In total, there are 135M parallel sentences from 1620
51
+ different language pairs.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/facebookresearch/LASER/tree/main/tasks/WikiMatrix"
55
+
56
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
57
+
58
+ _URLs = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/WikiMatrix.{lang1}-{lang2}.tsv.gz"
59
+
60
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+ _SEACROWD_VERSION = "2024.06.20"
64
+
65
+ config = {
66
+ "jv": ["en", "es", "fr", "id", "it", "pt"],
67
+ "ceb": ["bg", "ar", "ca", "cs", "de", "en", "es", "fi", "fr", "hu", "it", "ja", "nl", "no", "pl", "pt", "ro", "ru", "sv", "uk"],
68
+ "id": [
69
+ "jv",
70
+ "is",
71
+ "it",
72
+ "ja",
73
+ "ko",
74
+ "lt",
75
+ "mk",
76
+ "ml",
77
+ "mr",
78
+ "ne",
79
+ "nl",
80
+ "no",
81
+ "pl",
82
+ "pt",
83
+ "ro",
84
+ "ru",
85
+ "sh",
86
+ "si",
87
+ "sk",
88
+ "sl",
89
+ "sq",
90
+ "sr",
91
+ "sv",
92
+ "sw",
93
+ "ta",
94
+ "te",
95
+ "tl",
96
+ "tr",
97
+ "tt",
98
+ "uk",
99
+ "vi",
100
+ "zh",
101
+ "ar",
102
+ "az",
103
+ "ba",
104
+ "bg",
105
+ "bn",
106
+ "bs",
107
+ "ca",
108
+ "cs",
109
+ "da",
110
+ "de",
111
+ "el",
112
+ "en",
113
+ "eo",
114
+ "es",
115
+ "et",
116
+ "eu",
117
+ "fa",
118
+ "fi",
119
+ "fr",
120
+ "gl",
121
+ "he",
122
+ "hi",
123
+ "hr",
124
+ "hu",
125
+ ],
126
+ "tl": ["ar", "bg", "bs", "ca", "cs", "da", "de", "el", "en", "eo", "es", "et", "fi", "fr", "gl", "he", "hr", "hu", "id", "it", "ja", "lt", "mk", "nl", "no", "pl", "pt", "ro", "ru", "sh", "sk", "sl", "sq", "sr", "sv", "tr", "uk", "vi", "zh"],
127
+ "vi": [
128
+ "ar",
129
+ "az",
130
+ "bg",
131
+ "bn",
132
+ "bs",
133
+ "ca",
134
+ "cs",
135
+ "da",
136
+ "de",
137
+ "el",
138
+ "en",
139
+ "eo",
140
+ "es",
141
+ "et",
142
+ "eu",
143
+ "fa",
144
+ "fi",
145
+ "fr",
146
+ "gl",
147
+ "he",
148
+ "hi",
149
+ "hr",
150
+ "hu",
151
+ "id",
152
+ "is",
153
+ "it",
154
+ "ja",
155
+ "ko",
156
+ "lt",
157
+ "mk",
158
+ "ml",
159
+ "mr",
160
+ "nl",
161
+ "no",
162
+ "pl",
163
+ "pt",
164
+ "ro",
165
+ "ru",
166
+ "sh",
167
+ "si",
168
+ "sk",
169
+ "sl",
170
+ "sq",
171
+ "sr",
172
+ "sv",
173
+ "sw",
174
+ "ta",
175
+ "te",
176
+ "tl",
177
+ "tr",
178
+ "uk",
179
+ "zh",
180
+ ],
181
+ }
182
+ _SUBSETS = set()
183
+ for lang, pairs in config.items():
184
+ for pair in pairs:
185
+ _SUBSETS.add("{}-{}".format(lang, pair) if lang < pair else "{}-{}".format(pair, lang))
186
+ _SUBSETS = list(_SUBSETS)
187
+
188
+
189
+ class WikiMatrixDataset(datasets.GeneratorBasedBuilder):
190
+ """WikiMatrix is automatically extracted parallel sentences from the content of Wikipedia articles in 96 languages, including several dialects
191
+ or low-resource languages."""
192
+
193
+ BUILDER_CONFIGS = [
194
+ SEACrowdConfig(
195
+ name=f"wikimatrix_{subset.replace('-', '_')}_source",
196
+ version=datasets.Version(_SOURCE_VERSION),
197
+ description="WikiMatrix source schema",
198
+ schema="source",
199
+ subset_id=f"wikimatrix_{subset.replace('-', '_')}",
200
+ )
201
+ for subset in _SUBSETS
202
+ ] + [
203
+ SEACrowdConfig(
204
+ name=f"wikimatrix_{subset.replace('-', '_')}_seacrowd_t2t",
205
+ version=datasets.Version(_SEACROWD_VERSION),
206
+ description="WikiMatrix Nusantara schema",
207
+ schema="seacrowd_t2t",
208
+ subset_id=f"wikimatrix_{subset.replace('-', '_')}",
209
+ )
210
+ for subset in _SUBSETS
211
+ ]
212
+
213
+ DEFAULT_CONFIG_NAME = "wikimatrix_en_id_source"
214
+
215
+ def _info(self):
216
+ if self.config.schema == "source":
217
+ features = datasets.Features(
218
+ {
219
+ "id": datasets.Value("string"),
220
+ "text_1": datasets.Value("string"),
221
+ "text_2": datasets.Value("string"),
222
+ "text_1_name": datasets.Value("string"),
223
+ "text_2_name": datasets.Value("string"),
224
+ }
225
+ )
226
+ elif self.config.schema == "seacrowd_t2t":
227
+ features = schemas.text2text_features
228
+
229
+ return datasets.DatasetInfo(
230
+ description=_DESCRIPTION,
231
+ features=features,
232
+ homepage=_HOMEPAGE,
233
+ license=_LICENSE,
234
+ citation=_CITATION,
235
+ )
236
+
237
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
238
+ lang1, lang2 = self.config.name.split("_")[1], self.config.name.split("_")[2]
239
+ filepath = Path(dl_manager.download_and_extract(_URLs.format(lang1=lang1, lang2=lang2)))
240
+
241
+ return [
242
+ datasets.SplitGenerator(
243
+ name=datasets.Split.TEST,
244
+ gen_kwargs={"filepath": filepath},
245
+ ),
246
+ ]
247
+
248
+ def _generate_examples(self, filepath: Path):
249
+ with open(filepath, "r") as f:
250
+ data = f.readlines()
251
+
252
+ lang1, lang2 = self.config.name.split("_")[1], self.config.name.split("_")[2]
253
+ if self.config.schema == "source":
254
+ for _id, line in enumerate(data):
255
+ line = line.strip().split("\t")
256
+ ex = {
257
+ "id": str(_id),
258
+ "text_1": line[1],
259
+ "text_2": line[2],
260
+ "text_1_name": lang1,
261
+ "text_2_name": lang2,
262
+ }
263
+ yield _id, ex
264
+
265
+ elif self.config.schema == "seacrowd_t2t":
266
+ for _id, line in enumerate(data):
267
+ line = line.strip().split("\t")
268
+ ex = {
269
+ "id": str(_id),
270
+ "text_1": line[1],
271
+ "text_2": line[2],
272
+ "text_1_name": lang1,
273
+ "text_2_name": lang2,
274
+ }
275
+ yield _id, ex
276
+ else:
277
+ raise ValueError(f"Invalid config: {self.config.name}")