File size: 4,455 Bytes
a4633a6
 
 
 
 
 
89f5173
 
 
a4633a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89f5173
a4633a6
 
 
 
 
 
 
89f5173
a4633a6
 
89f5173
a4633a6
 
 
 
 
 
89f5173
 
 
a4633a6
89f5173
a4633a6
 
 
 
 
 
 
 
 
89f5173
a4633a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89f5173
a4633a6
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from pathlib import Path
from typing import Dict, List, Tuple

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Tasks

_CITATION = """\
@inproceedings{inproceedings,
author = {Alfina, Ika and Mulia, Rio and Fanany, Mohamad Ivan and Ekanata, Yudo},
year = {2017},
month = {10},
pages = {},
title = {Hate Speech Detection in the Indonesian Language: A Dataset and Preliminary Study},
doi = {10.1109/ICACSIS.2017.8355039}
}
"""

_LOCAL = False
_LANGUAGES = ["ind"]  # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
_DATASETNAME = "id_hatespeech"

_DESCRIPTION = """\
The ID Hatespeech dataset is collection of 713 tweets related to a political event, the Jakarta Governor Election 2017
designed for hate speech detection NLP task. This dataset is crawled from Twitter, and then filtered
and annotated manually. The dataset labelled into two; HS if the tweet contains hate speech and Non_HS if otherwise
"""

_HOMEPAGE = "https://www.researchgate.net/publication/320131169_Hate_Speech_Detection_in_the_Indonesian_Language_A_Dataset_and_Preliminary_Study"
_LICENSE = "Unknown"
_URLS = {
    _DATASETNAME: "https://raw.githubusercontent.com/ialfina/id-hatespeech-detection/master/IDHSD_RIO_unbalanced_713_2017.txt",
}
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"


class IdHatespeech(datasets.GeneratorBasedBuilder):
    """The ID Hatespeech dataset is collection of tweets related to a political event, the Jakarta Governor Election 2017
    designed for hate speech detection NLP task."""

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="id_hatespeech_source",
            version=SOURCE_VERSION,
            description="ID Hatespeech source schema",
            schema="source",
            subset_id="id_hatespeech",
        ),
        SEACrowdConfig(
            name="id_hatespeech_seacrowd_text",
            version=SEACROWD_VERSION,
            description="ID Hatespeech Nusantara schema",
            schema="seacrowd_text",
            subset_id="id_hatespeech",
        ),
    ]

    DEFAULT_CONFIG_NAME = "id_hatespeech_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
        elif self.config.schema == "seacrowd_text":
            features = schemas.text_features(["Non_HS", "HS"])
   

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        """Returns SplitGenerators."""
        # Dataset does not have predetermined split, putting all as TRAIN
        urls = _URLS[_DATASETNAME]
        base_dir = Path(dl_manager.download_and_extract(urls))
        data_files = {"train": base_dir}

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_files["train"],
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
        """Yields examples as (key, example) tuples."""
        # Dataset does not have id, using row index as id
        df = pd.read_csv(filepath, sep="\t", encoding="ISO-8859-1").reset_index()
        df.columns = ["id", "label", "tweet"]

        if self.config.schema == "source":
            for row in df.itertuples():
                ex = {
                    "tweet": row.tweet,
                    "label": row.label,
                }
                yield row.id, ex

        elif self.config.schema == "seacrowd_text":
            for row in df.itertuples():
                ex = {
                    "id": str(row.id),
                    "text": row.tweet,
                    "label": row.label,
                }
                yield row.id, ex
        else:
            raise ValueError(f"Invalid config: {self.config.name}")