Datasets:

ArXiv:
License:
holylovenia commited on
Commit
92d03b1
·
verified ·
1 Parent(s): c17778e

Upload sib_200.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sib_200.py +244 -0
sib_200.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ SIB-200 is the largest publicly available topic classification dataset based on Flores-200 covering 205 languages and dialects.
18
+ The train/validation/test sets are available for all the 205 languages.
19
+ """
20
+
21
+ import os
22
+ from pathlib import Path
23
+ from typing import List, Tuple, Dict
24
+
25
+ import datasets
26
+ import pandas as pd
27
+
28
+ from seacrowd.utils import schemas
29
+ from seacrowd.utils.configs import SEACrowdConfig
30
+ from seacrowd.utils.constants import Tasks, Licenses
31
+
32
+ _CITATION = """\
33
+ @misc{adelani2023sib200,
34
+ title={SIB-200: A Simple, Inclusive, and Big Evaluation Dataset for Topic Classification in 200+ Languages and Dialects},
35
+ author={David Ifeoluwa Adelani and Hannah Liu and Xiaoyu Shen and Nikita Vassilyev and Jesujoba O. Alabi and Yanke Mao and Haonan Gao and Annie En-Shiun Lee},
36
+ year={2023},
37
+ eprint={2309.07445},
38
+ archivePrefix={arXiv},
39
+ primaryClass={cs.CL}
40
+ }
41
+ """
42
+
43
+ _DATASETNAME = "sib_200"
44
+
45
+ _DESCRIPTION = """\
46
+ SIB-200 is the largest publicly available topic classification dataset based on Flores-200 covering 205 languages and dialects.
47
+ The train/validation/test sets are available for all the 205 languages.
48
+ """
49
+
50
+ _HOMEPAGE = "https://github.com/dadelani/sib-200"
51
+
52
+ _LANGUAGES = [
53
+ "ace",
54
+ "ban",
55
+ "bjn",
56
+ "bug",
57
+ "ceb",
58
+ "ilo",
59
+ "ind",
60
+ "jav",
61
+ "kac",
62
+ "khm",
63
+ "lao",
64
+ "lus",
65
+ "min",
66
+ "mya",
67
+ "pag",
68
+ "shn",
69
+ "sun",
70
+ "tgl",
71
+ "tha",
72
+ "vie",
73
+ "war",
74
+ "zsm",
75
+ ]
76
+
77
+ _SUPPORTED_LANGUAGE_CODES = [
78
+ "ace_Arab",
79
+ "ace_Latn",
80
+ "ban_Latn",
81
+ "bjn_Arab",
82
+ "bjn_Latn",
83
+ "bug_Latn",
84
+ "ceb_Latn",
85
+ "ilo_Latn",
86
+ "ind_Latn",
87
+ "jav_Latn",
88
+ "kac_Latn",
89
+ "khm_Khmr",
90
+ "lao_Laoo",
91
+ "lus_Latn",
92
+ "min_Arab",
93
+ "min_Latn",
94
+ "mya_Mymr",
95
+ "pag_Latn",
96
+ "shn_Mymr",
97
+ "sun_Latn",
98
+ "tgl_Latn",
99
+ "tha_Thai",
100
+ "vie_Latn",
101
+ "war_Latn",
102
+ "zsm_Latn",
103
+ ]
104
+
105
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
106
+
107
+ _LOCAL = False
108
+
109
+ # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
110
+ _URLS = {
111
+ "base_url": "https://huggingface.co/datasets/Davlan/sib200/raw/main/data"
112
+ }
113
+
114
+ _SUPPORTED_TASKS = [Tasks.TOPIC_MODELING]
115
+
116
+ _SOURCE_VERSION = "1.0.0"
117
+
118
+ _SEACROWD_VERSION = "2024.06.20"
119
+
120
+ _SEACROWD_SCHEMA = f"seacrowd_text"
121
+
122
+
123
+ def _sib_config_constructor(lang: str, schema: str = _SEACROWD_SCHEMA, version: str = _SEACROWD_VERSION) -> SEACrowdConfig:
124
+ return SEACrowdConfig(
125
+ name=f"{_DATASETNAME}_{lang}_{schema}",
126
+ version=version,
127
+ description=f"SIB-200 {schema} schema",
128
+ schema=schema,
129
+ subset_id=f"SIB-200 {lang}",
130
+ )
131
+
132
+
133
+ class Sib200Dataset(datasets.GeneratorBasedBuilder):
134
+ """
135
+ SIB-200 is the largest publicly available topic classification dataset based on Flores-200 covering 205 languages and dialects.
136
+ The train/validation/test sets are available for all the 205 languages.
137
+ """
138
+
139
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
140
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
141
+
142
+ def _populate_configs():
143
+ configs = [_sib_config_constructor(lang, schema="source", version=_SOURCE_VERSION) for lang in _SUPPORTED_LANGUAGE_CODES] + [_sib_config_constructor(lang, schema=_SEACROWD_SCHEMA, version=_SEACROWD_VERSION) for lang in _SUPPORTED_LANGUAGE_CODES]
144
+
145
+ all_lang_source_config = SEACrowdConfig(
146
+ name=f"{_DATASETNAME}_source",
147
+ version=_SOURCE_VERSION,
148
+ description=f"SIB-200 source schema",
149
+ schema="source",
150
+ subset_id=f"SIB-200 SEA",
151
+ )
152
+
153
+ all_lang_t2t_config = SEACrowdConfig(
154
+ name=f"{_DATASETNAME}_{_SEACROWD_SCHEMA}",
155
+ version=_SEACROWD_VERSION,
156
+ description=f"SIB-200 {_SEACROWD_SCHEMA} schema",
157
+ schema=_SEACROWD_SCHEMA,
158
+ subset_id=f"SIB-200 SEA",
159
+ )
160
+
161
+ configs.append(all_lang_source_config)
162
+ configs.append(all_lang_t2t_config)
163
+ return configs
164
+
165
+ BUILDER_CONFIGS = _populate_configs()
166
+
167
+ DEFAULT_CONFIG_NAME = "sib_200_source"
168
+
169
+ def _info(self) -> datasets.DatasetInfo:
170
+
171
+ if self.config.schema == "source":
172
+ features = datasets.Features(
173
+ {
174
+ "index_id": datasets.Value("int64"),
175
+ "text": datasets.Value("string"),
176
+ "category": datasets.Value("string"),
177
+ }
178
+ )
179
+
180
+ elif self.config.schema == "seacrowd_text":
181
+ features = schemas.text_features(["geography", "science/technology", "health", "travel", "entertainment", "politics", "sports"])
182
+
183
+ return datasets.DatasetInfo(
184
+ description=_DESCRIPTION,
185
+ features=features,
186
+ homepage=_HOMEPAGE,
187
+ license=_LICENSE,
188
+ citation=_CITATION,
189
+ )
190
+
191
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
192
+ """Returns SplitGenerators."""
193
+ # dl_manager not used since dataloader uses HF 'load_dataset'
194
+ lang = self.config.subset_id.split(" ")[-1]
195
+ if lang in _SUPPORTED_LANGUAGE_CODES:
196
+ train_paths = [Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/train.tsv"))]
197
+ valid_paths = [Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/dev.tsv"))]
198
+ test_paths = [Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/test.tsv"))]
199
+ lang_codes = [lang]
200
+ elif lang == "SEA":
201
+ train_paths, valid_paths, test_paths, lang_codes = [], [], [], []
202
+ for lang in _SUPPORTED_LANGUAGE_CODES:
203
+ train_paths.append(Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/train.tsv")))
204
+ valid_paths.append(Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/dev.tsv")))
205
+ test_paths.append(Path(dl_manager.download_and_extract(f"{_URLS['base_url']}/{lang}/test.tsv")))
206
+ lang_codes.append(lang)
207
+ else:
208
+ raise ValueError(f"Language {lang} not a SEA language in the dataset")
209
+ return [
210
+ datasets.SplitGenerator(
211
+ name=datasets.Split.TRAIN,
212
+ gen_kwargs={"file_paths": train_paths, "lang_codes": lang_codes}
213
+ ),
214
+ datasets.SplitGenerator(
215
+ name=datasets.Split.VALIDATION,
216
+ gen_kwargs={"file_paths": valid_paths, "lang_codes": lang_codes}
217
+ ),
218
+ datasets.SplitGenerator(
219
+ name=datasets.Split.TEST,
220
+ gen_kwargs={"file_paths": test_paths, "lang_codes": lang_codes}
221
+ )
222
+ ]
223
+
224
+ def _generate_examples(self, file_paths: List[str], lang_codes: List[str]) -> Tuple[int, Dict]:
225
+ """Yields examples as (key, example) tuples."""
226
+ index = 0
227
+ for file_path, lang_code in zip(file_paths, lang_codes):
228
+ lang_df = pd.read_csv(file_path, sep='\t')
229
+ for row in lang_df.itertuples():
230
+ if self.config.schema == "source":
231
+ example = {
232
+ "index_id": row.index_id,
233
+ "text": row.text,
234
+ "category": row.category,
235
+ }
236
+
237
+ elif self.config.schema == "seacrowd_text":
238
+ example = {
239
+ "id": f'{lang_code}_{row.index_id}',
240
+ "text": row.text,
241
+ "label": row.category,
242
+ }
243
+ yield index, example
244
+ index += 1