Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
4d85d65
·
verified ·
1 Parent(s): 56da773

Upload mtop_intent_classification.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mtop_intent_classification.py +135 -0
mtop_intent_classification.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Dict, List, Tuple
16
+
17
+ import datasets
18
+
19
+ from seacrowd.sea_datasets.mtop_intent_classification.labels import (
20
+ DOMAIN_LABELS, INTENT_LABELS)
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """\
26
+ @inproceedings{li-etal-2021-mtop,
27
+ author = {Li, Haoran and Arora, Abhinav and Chen, Shuochi and Gupta, Anchit and Gupta, Sonal and Mehdad, Yashar},
28
+ title = {MTOP: A Comprehensive Multilingual Task-Oriented Semantic Parsing Benchmark},
29
+ booktitle = {Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume},
30
+ publisher = {Association for Computational Linguistics},
31
+ year = {2021},
32
+ url = {https://aclanthology.org/2021.eacl-main.257},
33
+ doi = {10.18653/v1/2021.eacl-main.257},
34
+ pages = {2950-2962},
35
+ }
36
+ """
37
+ _LOCAL = False
38
+ _LANGUAGES = ["tha"]
39
+ _DATASETNAME = "mtop_intent_classification"
40
+ _DESCRIPTION = """
41
+ This dataset contains annotated utterances from 6 languages, including Thai,
42
+ for semantic parsing. Queries corresponding to the chosen domains are crowdsourced.
43
+ Two subsets are included in this dataset: 'domain' (eg. 'news', 'people', 'weather')
44
+ and 'intent' (eg. 'GET_MESSAGE', 'STOP_MUSIC', 'END_CALL')
45
+ """
46
+
47
+ _HOMEPAGE = "https://huggingface.co/mteb"
48
+ _LICENSE = Licenses.CC_BY_SA_4_0.value # Found in original dataset (not HF) linked in paper
49
+ _URL = "https://huggingface.co/datasets/mteb/"
50
+
51
+
52
+ _SUPPORTED_TASKS = [Tasks.INTENT_CLASSIFICATION]
53
+ _SOURCE_VERSION = "1.0.0"
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class MTOPIntentClassificationDataset(datasets.GeneratorBasedBuilder):
58
+ """Dataset of Thai sentences and their domains or intents."""
59
+
60
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
61
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
62
+ SUBSETS = ["domain", "intent"]
63
+
64
+ BUILDER_CONFIGS = [
65
+ SEACrowdConfig(
66
+ name=f"{_DATASETNAME}_{subset}_source",
67
+ version=datasets.Version(_SOURCE_VERSION),
68
+ description=f"{_DATASETNAME} source schema for {subset} subset",
69
+ schema="source",
70
+ subset_id=f"{_DATASETNAME}_{subset}",
71
+ )
72
+ for subset in SUBSETS
73
+ ] + [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_{subset}_seacrowd_text",
76
+ version=datasets.Version(_SEACROWD_VERSION),
77
+ description=f"{_DATASETNAME} SEACrowd schema for {subset} subset",
78
+ schema="seacrowd_text",
79
+ subset_id=f"{_DATASETNAME}_{subset}",
80
+ )
81
+ for subset in SUBSETS
82
+ ]
83
+
84
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_domain_source"
85
+
86
+ def _info(self) -> datasets.DatasetInfo:
87
+ if self.config.schema == "source":
88
+ features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("int64"),
91
+ "text": datasets.Value("string"),
92
+ "label": datasets.Value("int32"),
93
+ "label_text": datasets.Value("string"),
94
+ }
95
+ )
96
+
97
+ elif self.config.schema == "seacrowd_text":
98
+ if self.config.subset_id == "domain":
99
+ labels = DOMAIN_LABELS
100
+ elif self.config.subset_id == "intent":
101
+ labels = INTENT_LABELS
102
+ else:
103
+ raise ValueError(f"Received unexpected schema name {self.config.name}")
104
+ features = schemas.text_features(label_names=labels)
105
+
106
+ return datasets.DatasetInfo(
107
+ description=_DESCRIPTION,
108
+ features=features,
109
+ homepage=_HOMEPAGE,
110
+ license=_LICENSE,
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
115
+ # dl_manager not used since dataloader uses HF `load_dataset`
116
+ return [datasets.SplitGenerator(name=split, gen_kwargs={"split": split._name}) for split in (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)]
117
+
118
+ def _load_hf_data_from_remote(self, split: str) -> datasets.DatasetDict:
119
+ """Load dataset from HuggingFace."""
120
+ if self.config.subset_id not in ("domain", "intent"):
121
+ raise ValueError(f"Received unexpected schema name {self.config.name}")
122
+ HF_REMOTE_REF = "/".join(_URL.split("/")[-2:]) + f"mtop_{self.config.subset_id}"
123
+ _hf_dataset_source = datasets.load_dataset(HF_REMOTE_REF, "th", split=split)
124
+ return _hf_dataset_source
125
+
126
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
127
+ """Yields examples as (key, example) tuples."""
128
+ data = self._load_hf_data_from_remote(split=split)
129
+ for index, row in enumerate(data):
130
+ if self.config.schema == "source":
131
+ example = row
132
+
133
+ elif self.config.schema == "seacrowd_text":
134
+ example = {"id": str(index), "text": row["text"], "label": row["label_text"]}
135
+ yield index, example