ShaomuTan commited on
Commit
24d8527
·
1 Parent(s): 5eb49de

Create ec40.py

Browse files
Files changed (1) hide show
  1. ec40.py +143 -0
ec40.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """X-stance dataset for German and French/Italian stance detection"""
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+ @inproceedings{vamvas2020xstance,
11
+ author = "Vamvas, Jannis and Sennrich, Rico",
12
+ title = "{X-Stance}: A Multilingual Multi-Target Dataset for Stance Detection",
13
+ booktitle = "Proceedings of the 5th Swiss Text Analytics Conference (SwissText) \& 16th Conference on Natural Language Processing (KONVENS)",
14
+ address = "Zurich, Switzerland",
15
+ year = "2020",
16
+ month = "jun",
17
+ url = "http://ceur-ws.org/Vol-2624/paper9.pdf"
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ The x-stance dataset contains more than 150 political questions, and 67k comments written by candidates on those questions. The comments are partly German, partly French and Italian. The data have been extracted from the Swiss voting advice platform Smartvote.
23
+ """
24
+
25
+ _HOMEPAGE = "https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main"
26
+
27
+ _LICENSE = "cc-by-4.0"
28
+
29
+
30
+ _langs = [
31
+ 'af',
32
+ 'am',
33
+ 'ar',
34
+ 'ast',
35
+ 'be',
36
+ 'bg',
37
+ 'bn',
38
+ 'bs',
39
+ 'ca',
40
+ 'cs',
41
+ 'da',
42
+ 'de',
43
+ 'es',
44
+ 'fr',
45
+ 'gu',
46
+ 'ha',
47
+ 'he',
48
+ 'hi',
49
+ 'is',
50
+ 'it',
51
+ 'kab',
52
+ 'kn',
53
+ 'lb',
54
+ 'mr',
55
+ 'mt',
56
+ 'ne',
57
+ 'nl',
58
+ 'no',
59
+ 'oc',
60
+ 'pl',
61
+ 'pt',
62
+ 'ro',
63
+ 'ru',
64
+ 'sd',
65
+ 'so',
66
+ 'sr',
67
+ 'sv',
68
+ 'ti',
69
+ 'uk',
70
+ 'ur'
71
+ ]
72
+
73
+ _En_centric_Pairs = ['en-'+i for i in _langs]
74
+
75
+ _ZS_Pairs = [i+'-'+j for i in _langs for j in _langs if i!=j]
76
+
77
+
78
+ class EC40Config(datasets.BuilderConfig):
79
+
80
+ def __init__(self, language_pair, **kwargs):
81
+ super().__init__(**kwargs)
82
+ """
83
+ Args:
84
+ language_pair: language pair, you want to load
85
+ **kwargs: keyword arguments forwarded to super.
86
+ """
87
+ self.language_pair = language_pair
88
+
89
+ class EC40(datasets.GeneratorBasedBuilder):
90
+ """EC40 is English-centric, meaning that all training pairs include English on either the source or target side."""
91
+
92
+ VERSION = datasets.Version("1.0.0")
93
+
94
+ BUILDER_CONFIG_CLASS = EC40Config
95
+ BUILDER_CONFIGS = [
96
+ EC40Config(name=pair, description=None, language_pair=pair)
97
+ for pair in _En_centric_Pairs + _ZS_Pairs
98
+ ]
99
+
100
+ def _info(self):
101
+ src_tag, tgt_tag = self.config.language_pair.split("-")
102
+ return datasets.DatasetInfo(
103
+ description=None,
104
+ features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
105
+ supervised_keys=(src_tag, tgt_tag),
106
+ homepage="https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main/EC40_dataset",
107
+ citation=None,
108
+ )
109
+
110
+
111
+ def _split_generators(self, dl_manager):
112
+
113
+ lang_pair = self.config.language_pair
114
+ src_tag, tgt_tag = lang_pair.split("-")
115
+
116
+ train_src = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{src_tag}")
117
+ train_tgt = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{tgt_tag}")
118
+
119
+ valid_src = dl_manager.download_and_extract(f"Ntrex-eval-set/{self.config.language_pair}.{src_tag}")
120
+ valid_tgt = dl_manager.download_and_extract(f"Ntrex-eval-set/{self.config.language_pair}.{tgt_tag}")
121
+
122
+ test_src = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/{self.config.language_pair}.{src_tag}")
123
+ test_tgt = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/{self.config.language_pair}.{tgt_tag}")
124
+
125
+ return [
126
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_src, "labelpath": train_tgt, "split": "train",}),
127
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_src, "labelpath": valid_tgt, "split": "validation"}),
128
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_src, "labelpath": test_tgt, "split": "test"}),
129
+ ]
130
+
131
+ def _generate_examples(self, filepath, labelpath, files):
132
+ """Yields examples."""
133
+ src_tag, tgt_tag = self.config.language_pair.split("-")
134
+ src, tgt = None, None
135
+ for path, f in files:
136
+ if path == filepath:
137
+ src = f.read().decode("utf-8").split("\n")#[:-1]
138
+ elif path == labelpath:
139
+ tgt = f.read().decode("utf-8").split("\n")[:-1]
140
+ if src is not None and tgt is not None:
141
+ for idx, (s, t) in enumerate(zip(src, tgt)):
142
+ yield idx, {"translation": {src_tag: s, tgt_tag: t}}
143
+ break