Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
02987a3
·
verified ·
1 Parent(s): 9e2f82c

Upload indonglish.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indonglish.py +216 -0
indonglish.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{Astuti2023,
28
+ title = {Code-Mixed Sentiment Analysis using Transformer for Twitter Social Media Data},
29
+ journal = {International Journal of Advanced Computer Science and Applications},
30
+ doi = {10.14569/IJACSA.2023.0141053},
31
+ url = {http://dx.doi.org/10.14569/IJACSA.2023.0141053},
32
+ year = {2023},
33
+ publisher = {The Science and Information Organization},
34
+ volume = {14},
35
+ number = {10},
36
+ author = {Laksmita Widya Astuti and Yunita Sari and Suprapto}
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "indonglish"
41
+ _DESCRIPTION = """\
42
+ Indonglish-dataset was constructed based on keywords derived from the
43
+ sociolinguistic phenomenon observed among teenagers in South Jakarta. The
44
+ dataset was designed to tackle the semantic task of sentiment analysis,
45
+ incorporating three distinct label categories: positive, negative, and
46
+ neutral. The annotation of the dataset was carried out by a panel of five
47
+ annotators, each possessing expertise language and data science.
48
+ """
49
+
50
+ _HOMEPAGE = "https://github.com/laksmitawidya/indonglish-dataset"
51
+ _LANGUAGES = ["ind"]
52
+ _LICENSE = Licenses.UNKNOWN.value
53
+ _LOCAL = False
54
+
55
+ _URLS = {
56
+ "skenario-orig": {
57
+ "train": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario-ori/train.csv",
58
+ "validation": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario-ori/validation.csv",
59
+ "test": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario-ori/test.csv",
60
+ },
61
+ "skenario1": {
62
+ "train": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario1/training.csv",
63
+ "validation": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario1/validation.csv",
64
+ "test": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario1/test.csv",
65
+ },
66
+ "skenario2": {
67
+ "train": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario2/training.csv",
68
+ "validation": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario2/validation.csv",
69
+ "test": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario2/test.csv",
70
+ },
71
+ "skenario3": {
72
+ "train": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario3/training.csv",
73
+ "validation": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario3/validation.csv",
74
+ "test": "https://raw.githubusercontent.com/laksmitawidya/indonglish-dataset/master/skenario3/test.csv",
75
+ },
76
+ }
77
+
78
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
79
+
80
+ _SOURCE_VERSION = "1.0.0"
81
+ _SEACROWD_VERSION = "2024.06.20"
82
+
83
+
84
+ class Indonglish(datasets.GeneratorBasedBuilder):
85
+ """Indonglish dataset for sentiment analysis from https://github.com/laksmitawidya/indonglish-dataset."""
86
+
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
89
+
90
+ SEACROWD_SCHEMA_NAME = "text"
91
+ _LABELS = ["Positif", "Negatif", "Netral"]
92
+
93
+ BUILDER_CONFIGS = [
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_source",
96
+ version=SOURCE_VERSION,
97
+ description=f"{_DATASETNAME} source schema",
98
+ schema="source",
99
+ subset_id=_DATASETNAME,
100
+ ),
101
+ SEACrowdConfig(
102
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
103
+ version=SEACROWD_VERSION,
104
+ description=f"{_DATASETNAME} SEACrowd schema",
105
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
106
+ subset_id=_DATASETNAME,
107
+ ),
108
+ ]
109
+ for i in range(1, 4):
110
+ BUILDER_CONFIGS += [
111
+ SEACrowdConfig(
112
+ name=f"{_DATASETNAME}_skenario{i}_source",
113
+ version=SOURCE_VERSION,
114
+ description=f"{_DATASETNAME} source schema",
115
+ schema="source",
116
+ subset_id=f"{_DATASETNAME}_skenario{i}",
117
+ ),
118
+ SEACrowdConfig(
119
+ name=f"{_DATASETNAME}_skenario{i}_seacrowd_{SEACROWD_SCHEMA_NAME}",
120
+ version=SEACROWD_VERSION,
121
+ description=f"{_DATASETNAME} SEACrowd schema",
122
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
123
+ subset_id=f"{_DATASETNAME}_skenario{i}",
124
+ ),
125
+ ]
126
+
127
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
128
+
129
+ def _info(self) -> datasets.DatasetInfo:
130
+
131
+ if self.config.schema == "source":
132
+ features = datasets.Features(
133
+ {
134
+ "id": datasets.Value("string"),
135
+ "tweet": datasets.Value("string"),
136
+ "label": datasets.ClassLabel(names=self._LABELS),
137
+ }
138
+ )
139
+
140
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
141
+ features = schemas.text_features(self._LABELS)
142
+
143
+ return datasets.DatasetInfo(
144
+ description=_DESCRIPTION,
145
+ features=features,
146
+ homepage=_HOMEPAGE,
147
+ license=_LICENSE,
148
+ citation=_CITATION,
149
+ )
150
+
151
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
152
+ """Returns SplitGenerators."""
153
+
154
+ if "skenario" in self.config.name:
155
+ setting = self.config.name.split("_")[1]
156
+ else:
157
+ setting = "skenario-orig"
158
+
159
+ data_paths = {
160
+ setting: {
161
+ "train": Path(dl_manager.download_and_extract(_URLS[setting]["train"])),
162
+ "validation": Path(dl_manager.download_and_extract(_URLS[setting]["validation"])),
163
+ "test": Path(dl_manager.download_and_extract(_URLS[setting]["test"])),
164
+ }
165
+ }
166
+
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ gen_kwargs={
171
+ "filepath": data_paths[setting]["train"],
172
+ "split": "train",
173
+ },
174
+ ),
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ gen_kwargs={
178
+ "filepath": data_paths[setting]["test"],
179
+ "split": "test",
180
+ },
181
+ ),
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.VALIDATION,
184
+ gen_kwargs={
185
+ "filepath": data_paths[setting]["validation"],
186
+ "split": "dev",
187
+ },
188
+ ),
189
+ ]
190
+
191
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
192
+ """Yields examples as (key, example) tuples."""
193
+
194
+ # read csv file
195
+ with open(filepath, "r", encoding="utf-8") as csv_file:
196
+ csv_reader = csv.reader(csv_file)
197
+ csv_data = [row for row in csv_reader]
198
+ csv_data = csv_data[1:] # remove header
199
+
200
+ num_sample = len(csv_data)
201
+
202
+ for i in range(num_sample):
203
+ if self.config.schema == "source":
204
+ example = {
205
+ "id": str(i),
206
+ "tweet": csv_data[i][0],
207
+ "label": csv_data[i][1],
208
+ }
209
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
210
+ example = {
211
+ "id": str(i),
212
+ "text": csv_data[i][0],
213
+ "label": csv_data[i][1],
214
+ }
215
+
216
+ yield i, example