wannaphong commited on
Commit
4579cd3
·
1 Parent(s): 63f15eb

Add ThaiCulturaX_loading_script.py

Browse files
Files changed (1) hide show
  1. ThaiCulturaX_loading_script.py +81 -0
ThaiCulturaX_loading_script.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """South East Asia mC4 dataset."""
2
+ import gzip
3
+ import json
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """
8
+ Thai CulturaX Clean dataset."""
9
+ _CITATION = """EMPTY"""
10
+ _URL = "EMPTY"
11
+ _DATA_URL = "https://huggingface.co/datasets/pythainlp/thai-culturax-clean-dataset/tree/main/data/data.{index:02d}.json"
12
+ _LICENSE="odc-by"
13
+
14
+
15
+ class ThaiCulturaXConfig(datasets.BuilderConfig):
16
+ """BuilderConfig for Dataset."""
17
+
18
+ def __init__(self, *args, languages="Thai", **kwargs):
19
+ """BuilderConfig for Dataset.
20
+ Args:
21
+ languages (:obj:`List[str]`): list of languages to load
22
+ **kwargs: keyword arguments forwarded to super.
23
+ """
24
+ super().__init__(
25
+ *args,
26
+ **kwargs,
27
+ )
28
+ self.languages = languages
29
+
30
+
31
+ class ThaiCulturaX(datasets.GeneratorBasedBuilder):
32
+ """ThaiCulturaX clean dataset."""
33
+
34
+ BUILDER_CONFIGS = [
35
+ ThaiCulturaXConfig(anguages="Thai", version=datasets.Version("1.0.0"))
36
+ ]
37
+ BUILDER_CONFIG_CLASS = ThaiCulturaXConfig
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ description=_DESCRIPTION,
42
+ features=datasets.Features(
43
+ {
44
+ "text": datasets.Value("string"),
45
+ "meta": datasets.Value("string"),
46
+ }
47
+ ),
48
+ supervised_keys=None,
49
+ homepage=_URL,
50
+ citation=_CITATION,
51
+ license=_LICENSE,
52
+ )
53
+
54
+ def _split_generators(self, dl_manager):
55
+ data_urls = {}
56
+ for split in ["train"]:
57
+ data_urls[split] = [
58
+ _DATA_URL.format(
59
+ index=index,
60
+ )
61
+ for index in range(0, 53)
62
+ ]
63
+ train_downloaded_files = dl_manager.download(data_urls["train"])
64
+
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={"filepaths": train_downloaded_files},
69
+ ),
70
+ ]
71
+
72
+ def _generate_examples(self, filepaths):
73
+ id_ = 0
74
+ for filepath in filepaths:
75
+ logger.info("generating examples from = %s", filepath)
76
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
77
+ for line in f:
78
+ if line:
79
+ example = json.loads(line)
80
+ yield id_, example
81
+ id_ += 1