holylovenia commited on
Commit
3768563
·
verified ·
1 Parent(s): 7611ca4

Upload mswc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mswc.py +219 -0
mswc.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{mazumder2021mswc,
29
+ author = {Mazumder, Mark and Chitlangia, Sharad and Banbury, Colby and Kang, Yiping and Ciro, Juan and Achorn, Keith and Galvez,
30
+ Daniel and Sabini, Mark and Mattson, Peter and Kanter, David and Diamos, Greg and Warden, Pete and Meyer, Josh and Janapa Reddi,
31
+ Vijay},
32
+ booktitle = {Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks},
33
+ editor = {J. Vanschoren and S. Yeung},
34
+ pages = {},
35
+ publisher = {Curran},
36
+ title = {Multilingual Spoken Words Corpus},
37
+ url = {https://datasets-benchmarks-proceedings.neurips.cc/paper_files/paper/2021/file/fe131d7f5a6b38b23cc967316c13dae2-Paper-round2.pdf},
38
+ volume = {1},
39
+ year = {2021}
40
+ }
41
+ """
42
+
43
+ _DATASETNAME = "mswc"
44
+
45
+ _DESCRIPTION = """\
46
+ Multilingual Spoken Words Corpus is a large and growing audio dataset of spoken words in 50 languages collectively spoken by over 5 billion people, for academic research and commercial applications in keyword spotting and spoken term search.
47
+ """
48
+
49
+ _HOMEPAGE = "https://huggingface.co/datasets/MLCommons/ml_spoken_words"
50
+
51
+ _LANGUAGES = ["cnh", "ind", "vie"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
52
+ _LANGUAGE_NAME_MAP = {
53
+ "cnh": "cnh",
54
+ "ind": "id",
55
+ "vie": "vi",
56
+ }
57
+
58
+ _FORMATS = ["wav", "opus"]
59
+
60
+ _LICENSE = Licenses.CC_BY_4_0.value
61
+
62
+ _LOCAL = False
63
+
64
+ _URLS = "https://huggingface.co/datasets/MLCommons/ml_spoken_words/resolve/refs%2Fconvert%2Fparquet/{lang}_{format}/{split}/0000.parquet?download=true"
65
+
66
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
67
+ _SUPPORTED_SCHEMA_STRINGS = [f"seacrowd_{str(TASK_TO_SCHEMA[task]).lower()}" for task in _SUPPORTED_TASKS]
68
+
69
+ _SOURCE_VERSION = "1.0.0"
70
+
71
+ _SEACROWD_VERSION = "2024.06.20"
72
+
73
+
74
+ class MSWC(datasets.GeneratorBasedBuilder):
75
+ """
76
+ Multilingual Spoken Words Corpus is a large and growing audio dataset of spoken words in 50 languages collectively spoken by over 5 billion people, for academic research and commercial applications in keyword spotting and spoken term search.
77
+ """
78
+
79
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
80
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
81
+
82
+ BUILDER_CONFIGS = []
83
+
84
+ for language in _LANGUAGES:
85
+ for format in _FORMATS:
86
+ subset_id = f"{_DATASETNAME}_{language}_{format}"
87
+ BUILDER_CONFIGS.append(
88
+ SEACrowdConfig(name=f"{subset_id}_source", version=SOURCE_VERSION, description=f"{_DATASETNAME} source schema", schema="source", subset_id=subset_id),
89
+ )
90
+
91
+ seacrowd_schema_config: list[SEACrowdConfig] = []
92
+
93
+ for seacrowd_schema in _SUPPORTED_SCHEMA_STRINGS:
94
+ for language in _LANGUAGES:
95
+ for format in _FORMATS:
96
+ subset_id = f"{_DATASETNAME}_{language}_{format}"
97
+ seacrowd_schema_config.append(
98
+ SEACrowdConfig(
99
+ name=f"{subset_id}_{seacrowd_schema}",
100
+ version=SEACROWD_VERSION,
101
+ description=f"{_DATASETNAME} {seacrowd_schema} schema",
102
+ schema=f"{seacrowd_schema}",
103
+ subset_id=subset_id,
104
+ )
105
+ )
106
+
107
+ BUILDER_CONFIGS.extend(seacrowd_schema_config)
108
+
109
+ DEFAULT_CONFIG_NAME = f"{_LANGUAGES[0]}_{_FORMATS[0]}_source"
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+
113
+ _, _, format = str(self.config.subset_id).split("_")
114
+
115
+ if self.config.schema == "source":
116
+ features = datasets.Features(
117
+ {
118
+ "file": datasets.Value("string"),
119
+ "is_valid": datasets.Value("bool"),
120
+ "language": datasets.ClassLabel(num_classes=3),
121
+ "speaker_id": datasets.Value("string"),
122
+ "gender": datasets.ClassLabel(num_classes=4),
123
+ "keyword": datasets.Value("string"),
124
+ "audio": datasets.Audio(decode=False, sampling_rate=16000 if format == "wav" else 48000),
125
+ }
126
+ )
127
+
128
+ elif self.config.schema == f"seacrowd_{str(TASK_TO_SCHEMA[Tasks.SPEECH_RECOGNITION]).lower()}":
129
+ features = schemas.speech_text_features
130
+
131
+ else:
132
+ raise ValueError(f"Invalid config: {self.config.name}")
133
+
134
+ return datasets.DatasetInfo(
135
+ description=_DESCRIPTION,
136
+ features=features,
137
+ homepage=_HOMEPAGE,
138
+ license=_LICENSE,
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
143
+ """Returns SplitGenerators."""
144
+
145
+ split_names = ["train", "validation", "test"]
146
+
147
+ result = []
148
+
149
+ _, language, format = str(self.config.subset_id).split("_")
150
+
151
+ for split_name in split_names:
152
+ path = dl_manager.download_and_extract(_URLS.format(split=split_name, lang=_LANGUAGE_NAME_MAP[language], format=format))
153
+
154
+ result.append(
155
+ datasets.SplitGenerator(
156
+ name=split_name,
157
+ gen_kwargs={
158
+ "path": path,
159
+ "split": split_name,
160
+ "language": language,
161
+ "format": format,
162
+ },
163
+ ),
164
+ )
165
+
166
+ return result
167
+
168
+ def _generate_examples(self, path: Path, split: str, language: str, format: str) -> Tuple[int, Dict]:
169
+ """Yields examples as (key, example) tuples."""
170
+
171
+ idx = 0
172
+
173
+ if self.config.schema == "source":
174
+ df = pd.read_parquet(path)
175
+
176
+ for _, row in df.iterrows():
177
+ yield idx, row.to_dict()
178
+ idx += 1
179
+
180
+ elif self.config.schema == f"seacrowd_{str(TASK_TO_SCHEMA[Tasks.SPEECH_RECOGNITION]).lower()}":
181
+ df = pd.read_parquet(path)
182
+
183
+ base_folder = os.path.dirname(path)
184
+ base_folder = os.path.join(base_folder, _DATASETNAME, language, format, split)
185
+
186
+ if not os.path.exists(base_folder):
187
+ os.makedirs(base_folder)
188
+
189
+ audio_paths = []
190
+
191
+ for _, row in df.iterrows():
192
+ audio_dict = row["audio"]
193
+ file_name = audio_dict["path"]
194
+
195
+ path = os.path.join(base_folder, file_name)
196
+
197
+ audio_dict["path"] = path
198
+
199
+ with open(path, "wb") as f:
200
+ f.write(audio_dict["bytes"])
201
+
202
+ audio_paths.append(path)
203
+
204
+ df.rename(columns={"label": "text"}, inplace=True)
205
+
206
+ df["path"] = audio_paths
207
+
208
+ df["id"] = df.index + idx
209
+ df = df.assign(text="").astype({"text": "str"})
210
+ df = df.assign(metadata=[{"speaker_age": 0, "speaker_gender": gender} for gender in df["gender"]]).astype({"metadata": "object"})
211
+
212
+ df.drop(columns=["file", "is_valid", "language", "gender", "keyword"], inplace=True)
213
+
214
+ for _, row in df.iterrows():
215
+ yield idx, row.to_dict()
216
+ idx += 1
217
+
218
+ else:
219
+ raise ValueError(f"Invalid config: {self.config.name}")