Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
5cdb77d
·
verified ·
1 Parent(s): 129049c

Upload spamid_pair.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. spamid_pair.py +160 -0
spamid_pair.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @article{Chrismanto2022,
29
+ title = {SPAMID-PAIR: A Novel Indonesian Post–Comment Pairs Dataset Containing Emoji},
30
+ journal = {International Journal of Advanced Computer Science and Applications},
31
+ doi = {10.14569/IJACSA.2022.0131110},
32
+ url = {http://dx.doi.org/10.14569/IJACSA.2022.0131110},
33
+ year = {2022},
34
+ publisher = {The Science and Information Organization},
35
+ volume = {13},
36
+ number = {11},
37
+ author = {Antonius Rachmat Chrismanto and Anny Kartika Sari and Yohanes Suyanto}
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "spamid_pair"
42
+
43
+
44
+ _DESCRIPTION = """\
45
+ SPAMID-PAIR is data post-comment pairs collected from 13 selected Indonesian public figures (artists) / public accounts
46
+ with more than 15 million followers and categorized as famous artists.
47
+ It was collected from Instagram using an online tool and Selenium.
48
+ Two persons labeled all pair data as an expert in a total of 72874 data.
49
+ The data contains Unicode text (UTF-8) and emojis scrapped in posts and comments without account profile information.
50
+ """
51
+
52
+ _HOMEPAGE = "https://data.mendeley.com/datasets/fj5pbdf95t/1"
53
+
54
+ _LANGUAGES = ["ind"]
55
+
56
+
57
+ _LICENSE = Licenses.CC_BY_4_0.value
58
+
59
+ _LOCAL = False
60
+
61
+
62
+ _URLS = {
63
+ _DATASETNAME: "https://prod-dcd-datasets-cache-zipfiles.s3.eu-west-1.amazonaws.com/fj5pbdf95t-1.zip",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.INTENT_CLASSIFICATION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+
70
+ _SEACROWD_VERSION = "2024.06.20"
71
+
72
+
73
+ class SpamidPairDataset(datasets.GeneratorBasedBuilder):
74
+ """SPAMID-PAIR is data post-comment pairs collected from 13 selected Indonesian public figures (artists) / public accounts with more than 15 million followers and categorized as famous artists."""
75
+
76
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
77
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
78
+
79
+ LABEL_CLASSES = [1, 0]
80
+
81
+ SEACROWD_SCHEMA_NAME = "text"
82
+
83
+ BUILDER_CONFIGS = [
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_source",
86
+ version=SOURCE_VERSION,
87
+ description=f"{_DATASETNAME} source schema",
88
+ schema="source",
89
+ subset_id=_DATASETNAME,
90
+ ),
91
+ SEACrowdConfig(
92
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
93
+ version=SEACROWD_VERSION,
94
+ description=f"{_DATASETNAME} SEACrowd schema",
95
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
96
+ subset_id=_DATASETNAME,
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
101
+
102
+ def _info(self) -> datasets.DatasetInfo:
103
+
104
+ if self.config.schema == "source":
105
+ features = datasets.Features(
106
+ {
107
+ "igid": datasets.Value("string"),
108
+ "comment": datasets.Value("string"),
109
+ "posting": datasets.Value("string"),
110
+ "spam": datasets.ClassLabel(names=self.LABEL_CLASSES),
111
+ }
112
+ )
113
+
114
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
115
+ features = schemas.text_features(self.LABEL_CLASSES)
116
+
117
+ return datasets.DatasetInfo(
118
+ description=_DESCRIPTION,
119
+ features=features,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
126
+ """Returns SplitGenerators."""
127
+ urls = _URLS[_DATASETNAME]
128
+ data_dir = Path(dl_manager.download_and_extract(urls))
129
+ data_dir = os.path.join(os.path.join(os.path.join(data_dir, "SPAMID-PAIR"), "Raw"), "dataset-raw.xlsx")
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "filepath": data_dir,
136
+ "split": "train",
137
+ },
138
+ )
139
+ ]
140
+
141
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
142
+ """Yields examples as (key, example) tuples."""
143
+ data = pd.read_excel(filepath)
144
+
145
+ if self.config.schema == "source":
146
+ for i, row in data.iterrows():
147
+ yield i, {
148
+ "igid": str(row["igid"]),
149
+ "comment": str(row["comment"]),
150
+ "posting": str(row["posting"]),
151
+ "spam": row["spam"],
152
+ }
153
+
154
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
155
+ for i, row in data.iterrows():
156
+ yield i, {
157
+ "id": str(i),
158
+ "text": str(row["comment"]) + "\n" + str(row["posting"]),
159
+ "label": int(row["spam"]),
160
+ }