mskrt commited on
Commit
0ac491c
·
verified ·
1 Parent(s): d6cc8a6

Upload PAIR.py

Browse files
Files changed (1) hide show
  1. PAIR.py +149 -79
PAIR.py CHANGED
@@ -13,13 +13,10 @@
13
  # limitations under the License.
14
 
15
 
16
- import csv
17
  import json
18
- import os
19
 
20
  import datasets
21
 
22
-
23
  # TODO: Add BibTeX citation
24
  # Find for instance the citation on arxiv or on the dataset repo/website
25
  _CITATION = """\
@@ -52,95 +49,168 @@ _URLS = {
52
 
53
 
54
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
55
- class PAIRDataset(datasets.GeneratorBasedBuilder):
56
- """TODO: Short description of my dataset."""
57
-
58
- VERSION = datasets.Version("1.1.0")
59
 
60
- # This is an example of a dataset with multiple configurations.
61
- # If you don't want/need to define several sub-sets in your dataset,
62
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
63
 
64
- # If you need to make complex sub-parts in the datasets with configurable options
65
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
66
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
67
-
68
- # You will be able to load one or the other configurations in the following list with
69
- # data = datasets.load_dataset('my_dataset', 'first_domain')
70
- # data = datasets.load_dataset('my_dataset', 'second_domain')
71
- BUILDER_CONFIGS = [
72
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
73
- ]
74
-
75
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
76
 
77
  def _info(self):
78
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
79
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
80
- features = datasets.Features(
 
81
  {
82
- "sentence": datasets.Value("string"),
83
- "option1": datasets.Value("string"),
84
- "answer": datasets.Value("string")
85
- # These are the features of your dataset like images, labels ...
86
  }
87
- )
88
-
89
- return datasets.DatasetInfo(
90
- # This is the description that will appear on the datasets page.
91
- description=_DESCRIPTION,
92
- # This defines the different columns of the dataset and their types
93
- features=features, # Here we define them above because they are different between the two configurations
94
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
95
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
96
- # supervised_keys=("sentence", "label"),
97
- # Homepage of the dataset for documentation
98
- homepage=_HOMEPAGE,
99
- # License for the dataset if available
100
- license=_LICENSE,
101
- # Citation for the dataset
102
- citation=_CITATION,
103
  )
104
 
105
  def _split_generators(self, dl_manager):
106
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
107
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
108
-
109
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
110
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
111
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
- # urls = _URLS[self.config.name]
113
- #data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
114
  return [
115
- # datasets.SplitGenerator(
116
- # name=datasets.Split.TRAIN,
117
- # # These kwargs will be passed to _generate_examples
118
- ### gen_kwargs={
119
- # "filepath": os.path.join(data_dir, "train.json"),
120
- # "split": "train",
121
- # },
122
- # ),
123
  datasets.SplitGenerator(
124
  name=datasets.Split.TEST,
125
- # These kwargs will be passed to _generate_examples
126
- gen_kwargs={
127
- "filepath": "test.json", #os.path.join(data_dir, "test.json"),
128
- "split": "test"
129
- },
130
  ),
131
  ]
132
 
133
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
134
- def _generate_examples(self, filepath, split):
135
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
136
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
 
 
 
 
 
137
  with open(filepath, encoding="utf-8") as f:
138
- for key, row in enumerate(f):
139
- data = json.loads(row)
140
- if data['content'] != [None]:
141
- yield key, {
142
- "sequence": data["seq"],
143
- "pid": data["pid"],
144
- #"content": "" if split == "test" else data["answer"],
145
- "content": data['content'][0],
146
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # limitations under the License.
14
 
15
 
 
16
  import json
 
17
 
18
  import datasets
19
 
 
20
  # TODO: Add BibTeX citation
21
  # Find for instance the citation on arxiv or on the dataset repo/website
22
  _CITATION = """\
 
49
 
50
 
51
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
 
 
 
52
 
 
 
 
53
 
54
+ class PAIRDataset(datasets.GeneratorBasedBuilder):
55
+ """PAIRDataset."""
 
 
 
 
 
 
 
 
 
 
56
 
57
  def _info(self):
58
+ """_info."""
59
+ return datasets.DatasetInfo(
60
+ description="My custom dataset.",
61
+ features=datasets.Features(
62
  {
63
+ "annotation_type": datasets.Value("string"),
64
+ "sequence": datasets.Value("string"),
65
+ "pid": datasets.Value("string"),
66
+ "annotation": datasets.Value("string"),
67
  }
68
+ ),
69
+ supervised_keys=None,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
 
72
  def _split_generators(self, dl_manager):
73
+ """_split_generators.
74
+
75
+ Parameters
76
+ ----------
77
+ dl_manager :
78
+ dl_manager
79
+ """
80
+ # Implement logic to download and extract data files
81
+ # For simplicity, assume data_files is a dict with paths to your data
82
+ data_files = {
83
+ "train": "train.json",
84
+ "test": "test.json",
85
+ }
86
  return [
87
+ # datasets.SplitGenerator(
88
+ # name=datasets.Split.TRAIN,
89
+ # gen_kwargs={'filepath': data_files['train']},
90
+ # ),
 
 
 
 
91
  datasets.SplitGenerator(
92
  name=datasets.Split.TEST,
93
+ gen_kwargs={"filepath": data_files["test"]},
 
 
 
 
94
  ),
95
  ]
96
 
97
+ def _generate_examples(self, filepath):
98
+ """_generate_examples.
99
+
100
+ Parameters
101
+ ----------
102
+ filepath :
103
+ filepath
104
+ """
105
+ # Implement your data reading logic here
106
  with open(filepath, encoding="utf-8") as f:
107
+ data = json.load(f)
108
+ counter = 0
109
+ for idx, annotation_type in enumerate(data.keys()):
110
+ # Parse your line into the appropriate fields
111
+ samples = data[annotation_type]
112
+ for idx_2, elem in enumerate(samples):
113
+ # example = parse_line_to_example(line)
114
+ print(elem["content"], type(elem["content"]))
115
+ if elem["content"] != [None]:
116
+ yield annotation_type, {
117
+ "annotation_type": annotation_type,
118
+ "sequence": elem["seq"],
119
+ "pid": elem["pid"],
120
+ "annotation": elem["content"][0],
121
+ }
122
+ counter += 1
123
+
124
+
125
+ # class PAIRDataset(datasets.GeneratorBasedBuilder):
126
+ # """TODO: Short description of my dataset."""
127
+ #
128
+ # VERSION = datasets.Version("1.1.0")
129
+ #
130
+ # # This is an example of a dataset with multiple configurations.
131
+ # # If you don't want/need to define several sub-sets in your dataset,
132
+ # # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
133
+ #
134
+ # # If you need to make complex sub-parts in the datasets with configurable options
135
+ # # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
136
+ # # BUILDER_CONFIG_CLASS = MyBuilderConfig
137
+ #
138
+ # # You will be able to load one or the other configurations in the following list with
139
+ # # data = datasets.load_dataset('my_dataset', 'first_domain')
140
+ # # data = datasets.load_dataset('my_dataset', 'second_domain')
141
+ # BUILDER_CONFIGS = [
142
+ # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
143
+ # ]
144
+ #
145
+ # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
146
+ #
147
+ # def _info(self):
148
+ # # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
149
+ # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
150
+ # features = datasets.Features(
151
+ # {
152
+ # "sentence": datasets.Value("string"),
153
+ # "option1": datasets.Value("string"),
154
+ # "answer": datasets.Value("string")
155
+ # # These are the features of your dataset like images, labels ...
156
+ # }
157
+ # )
158
+ #
159
+ # return datasets.DatasetInfo(
160
+ # # This is the description that will appear on the datasets page.
161
+ # description=_DESCRIPTION,
162
+ # # This defines the different columns of the dataset and their types
163
+ # features=features, # Here we define them above because they are different between the two configurations
164
+ # # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
165
+ # # specify them. They'll be used if as_supervised=True in builder.as_dataset.
166
+ # # supervised_keys=("sentence", "label"),
167
+ # # Homepage of the dataset for documentation
168
+ # homepage=_HOMEPAGE,
169
+ # # License for the dataset if available
170
+ # license=_LICENSE,
171
+ # # Citation for the dataset
172
+ # citation=_CITATION,
173
+ # )
174
+ #
175
+ # def _split_generators(self, dl_manager):
176
+ # # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
177
+ # # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
178
+ #
179
+ # # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
180
+ # # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
181
+ # # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
182
+ # # urls = _URLS[self.config.name]
183
+ # #data_dir = dl_manager.download_and_extract(urls)
184
+ # return [
185
+ # # datasets.SplitGenerator(
186
+ # # name=datasets.Split.TRAIN,
187
+ # # # These kwargs will be passed to _generate_examples
188
+ # ### gen_kwargs={
189
+ # # "filepath": os.path.join(data_dir, "train.json"),
190
+ # # "split": "train",
191
+ # # },
192
+ # # ),
193
+ # datasets.SplitGenerator(
194
+ # name=datasets.Split.TEST,
195
+ # # These kwargs will be passed to _generate_examples
196
+ # gen_kwargs={
197
+ # "filepath": "test.json", #os.path.join(data_dir, "test.json"),
198
+ # "split": "test"
199
+ # },
200
+ # ),
201
+ # ]
202
+ #
203
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
204
+ # def _generate_examples(self, filepath, split):
205
+ # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
206
+ # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
207
+ # with open(filepath, encoding="utf-8") as f:
208
+ # for key, row in enumerate(f):
209
+ # data = json.loads(row)
210
+ # if data['content'] != [None]:
211
+ # yield key, {
212
+ # "sequence": data["seq"],
213
+ # "pid": data["pid"],
214
+ # #"content": "" if split == "test" else data["answer"],
215
+ # "content": data['content'][0],
216
+ # }