Datasets:

ArXiv:
License:
holylovenia commited on
Commit
199aa31
·
verified ·
1 Parent(s): 4b4f9d8

Upload openslr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. openslr.py +258 -0
openslr.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import re
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{kjartansson18_sltu,
29
+ author={Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
30
+ title={{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
31
+ year=2018,
32
+ booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
33
+ pages={52--55},
34
+ doi={10.21437/SLTU.2018-11}
35
+ }
36
+ """
37
+
38
+ _DATASETNAME = "openslr"
39
+
40
+ _DESCRIPTION = """\
41
+ This data set contains transcribed high-quality audio of Javanese, Sundanese, Burmese, Khmer. This data set\
42
+ come from 3 different projects under OpenSLR initiative
43
+ """
44
+
45
+ _HOMEPAGE = "https://www.openslr.org/resources.php"
46
+
47
+ _LANGUAGES = ["mya", "jav", "sun", "khm"]
48
+
49
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
50
+
51
+ _LOCAL = False
52
+
53
+ _RESOURCES = {
54
+ "SLR35": {
55
+ "language": "jav",
56
+ "files": [
57
+ "asr_javanese_0.zip",
58
+ "asr_javanese_1.zip",
59
+ "asr_javanese_2.zip",
60
+ "asr_javanese_3.zip",
61
+ "asr_javanese_4.zip",
62
+ "asr_javanese_5.zip",
63
+ "asr_javanese_6.zip",
64
+ "asr_javanese_7.zip",
65
+ "asr_javanese_8.zip",
66
+ "asr_javanese_9.zip",
67
+ "asr_javanese_a.zip",
68
+ "asr_javanese_b.zip",
69
+ "asr_javanese_c.zip",
70
+ "asr_javanese_d.zip",
71
+ "asr_javanese_e.zip",
72
+ "asr_javanese_f.zip",
73
+ ],
74
+ "index_files": ["asr_javanese/utt_spk_text.tsv"] * 16,
75
+ "data_dirs": ["asr_javanese/data"] * 16,
76
+ },
77
+ "SLR36": {
78
+ "language": "sun",
79
+ "files": [
80
+ "asr_sundanese_0.zip",
81
+ "asr_sundanese_1.zip",
82
+ "asr_sundanese_2.zip",
83
+ "asr_sundanese_3.zip",
84
+ "asr_sundanese_4.zip",
85
+ "asr_sundanese_5.zip",
86
+ "asr_sundanese_6.zip",
87
+ "asr_sundanese_7.zip",
88
+ "asr_sundanese_8.zip",
89
+ "asr_sundanese_9.zip",
90
+ "asr_sundanese_a.zip",
91
+ "asr_sundanese_b.zip",
92
+ "asr_sundanese_c.zip",
93
+ "asr_sundanese_d.zip",
94
+ "asr_sundanese_e.zip",
95
+ "asr_sundanese_f.zip",
96
+ ],
97
+ "index_files": ["asr_sundanese/utt_spk_text.tsv"] * 16,
98
+ "data_dirs": ["asr_sundanese/data"] * 16,
99
+ },
100
+ "SLR41": {
101
+ "language": "jav",
102
+ "files": ["jv_id_female.zip", "jv_id_male.zip"],
103
+ "index_files": ["jv_id_female/line_index.tsv", "jv_id_male/line_index.tsv"],
104
+ "data_dirs": ["jv_id_female/wavs", "jv_id_male/wavs"],
105
+ },
106
+ "SLR42": {
107
+ "language": "khm",
108
+ "files": ["km_kh_male.zip"],
109
+ "index_files": ["km_kh_male/line_index.tsv"],
110
+ "data_dirs": ["km_kh_male/wavs"],
111
+ },
112
+ "SLR44": {
113
+ "language": "sun",
114
+ "files": ["su_id_female.zip", "su_id_male.zip"],
115
+ "index_files": ["su_id_female/line_index.tsv", "su_id_male/line_index.tsv"],
116
+ "data_dirs": ["su_id_female/wavs", "su_id_male/wavs"],
117
+ },
118
+ "SLR80": {
119
+ "language": "mya",
120
+ "files": ["my_mm_female.zip"],
121
+ "index_files": ["line_index.tsv"],
122
+ "data_dirs": [""],
123
+ },
124
+ }
125
+ _URLS = {_DATASETNAME: "https://openslr.org/resources/{subset}"}
126
+
127
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
128
+
129
+ _SOURCE_VERSION = "1.0.0"
130
+
131
+ _SEACROWD_VERSION = "2024.06.20"
132
+
133
+
134
+ class OpenSLRDataset(datasets.GeneratorBasedBuilder):
135
+ """This data set contains transcribed high-quality audio of Javanese, Sundanese, Burmese, Khmer. This data set
136
+ come from 3 different projects under OpenSLR initiative"""
137
+
138
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
139
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
140
+
141
+ BUILDER_CONFIGS = [
142
+ SEACrowdConfig(name=f"{_DATASETNAME}_{subset}_{_RESOURCES[subset]['language']}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME} source schema", schema="source", subset_id=f"{_DATASETNAME}")
143
+ for subset in _RESOURCES.keys()
144
+ ] + [
145
+ SEACrowdConfig(
146
+ name=f"{_DATASETNAME}_{subset}_{_RESOURCES[subset]['language']}_seacrowd_sptext", version=datasets.Version(_SEACROWD_VERSION), description=f"{_DATASETNAME} SEACrowd schema", schema="seacrowd_sptext", subset_id=f"{_DATASETNAME}"
147
+ )
148
+ for subset in _RESOURCES.keys()
149
+ ]
150
+
151
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_SLR41_jav_source"
152
+
153
+ def _info(self) -> datasets.DatasetInfo:
154
+
155
+ if self.config.schema == "source":
156
+ features = datasets.Features(
157
+ {
158
+ "path": datasets.Value("string"),
159
+ "audio": datasets.Audio(sampling_rate=48_000),
160
+ "sentence": datasets.Value("string"),
161
+ }
162
+ )
163
+ elif self.config.schema == "seacrowd_sptext":
164
+ features = schemas.speech_text_features
165
+
166
+ return datasets.DatasetInfo(
167
+ description=_DESCRIPTION,
168
+ features=features,
169
+ homepage=_HOMEPAGE,
170
+ license=_LICENSE,
171
+ citation=_CITATION,
172
+ )
173
+
174
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
175
+ """Returns SplitGenerators."""
176
+ subset = self.config.name.split("_")[1]
177
+ urls = [f"{_URLS[_DATASETNAME].format(subset=subset[3:])}/{file}" for file in _RESOURCES[subset]["files"]]
178
+ data_dir = dl_manager.download_and_extract(urls)
179
+
180
+ path_to_indexs = [os.path.join(path, f"{_RESOURCES[subset]['index_files'][i]}") for i, path in enumerate(data_dir)]
181
+ path_to_datas = [os.path.join(path, f"{_RESOURCES[subset]['data_dirs'][i]}") for i, path in enumerate(data_dir)]
182
+
183
+ return [
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.TRAIN,
186
+ gen_kwargs={
187
+ "filepath": [path_to_indexs, path_to_datas],
188
+ "split": "train",
189
+ },
190
+ )
191
+ ]
192
+
193
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
194
+ """Yields examples as (key, example) tuples."""
195
+ subset = self.config.name.split("_")[1]
196
+ path_to_indexs, path_to_datas = filepath[0], filepath[1]
197
+ counter = -1
198
+ if subset in ["SLR35", "SLR36"]:
199
+ sentence_index = {}
200
+ for i, path_to_index in enumerate(path_to_indexs):
201
+ with open(path_to_index, encoding="utf-8") as f:
202
+ lines = f.readlines()
203
+ for id_, line in enumerate(lines):
204
+ field_values = re.split(r"\t\t?", line.strip())
205
+ filename, user_id, sentence = field_values
206
+ sentence_index[filename] = sentence
207
+ for path_to_data in sorted(Path(path_to_datas[i]).rglob("*.flac")):
208
+ filename = path_to_data.stem
209
+ if path_to_data.stem not in sentence_index:
210
+ continue
211
+ path = str(path_to_data.resolve())
212
+ sentence = sentence_index[filename]
213
+ counter += 1
214
+ if self.config.schema == "source":
215
+ example = {"path": path, "audio": path, "sentence": sentence}
216
+ elif self.config.schema == "seacrowd_sptext":
217
+ example = {
218
+ "id": counter,
219
+ "path": path,
220
+ "audio": path,
221
+ "text": sentence,
222
+ "speaker_id": user_id,
223
+ "metadata": {
224
+ "speaker_age": None,
225
+ "speaker_gender": None,
226
+ },
227
+ }
228
+ yield counter, example
229
+ else:
230
+ for i, path_to_index in enumerate(path_to_indexs):
231
+ geneder = "female" if "female" in path_to_index else "male"
232
+ with open(path_to_index, encoding="utf-8") as f:
233
+ lines = f.readlines()
234
+ for id_, line in enumerate(lines):
235
+ # Following regexs are needed to normalise the lines, since the datasets
236
+ # are not always consistent and have bugs:
237
+ line = re.sub(r"\t[^\t]*\t", "\t", line.strip())
238
+ field_values = re.split(r"\t\t?", line)
239
+ if len(field_values) != 2:
240
+ continue
241
+ filename, sentence = field_values
242
+ path = os.path.join(path_to_datas[i], f"{filename}.wav")
243
+ counter += 1
244
+ if self.config.schema == "source":
245
+ example = {"path": path, "audio": path, "sentence": sentence}
246
+ elif self.config.schema == "seacrowd_sptext":
247
+ example = {
248
+ "id": counter,
249
+ "path": path,
250
+ "audio": path,
251
+ "text": sentence,
252
+ "speaker_id": None,
253
+ "metadata": {
254
+ "speaker_age": None,
255
+ "speaker_gender": geneder,
256
+ },
257
+ }
258
+ yield counter, example