mskrt commited on
Commit
703f076
·
verified ·
1 Parent(s): 42dc314

Upload PAIR.py

Browse files
Files changed (1) hide show
  1. PAIR.py +11 -98
PAIR.py CHANGED
@@ -49,19 +49,24 @@ _URLS = {
49
 
50
 
51
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
52
-
 
 
53
 
54
  class PAIRDataset(datasets.GeneratorBasedBuilder):
55
  """PAIRDataset."""
56
-
 
 
 
 
57
  def _info(self):
58
  """_info."""
59
  return datasets.DatasetInfo(
60
  description="My custom dataset.",
61
  features=datasets.Features(
62
  {
63
- "EC": datasets.Sequence(datasets.Value("string")),
64
- "names": datasets.Value("string"),
65
  "sequence": datasets.Value("string"),
66
  "pid": datasets.Value("string"),
67
  }
@@ -109,12 +114,13 @@ class PAIRDataset(datasets.GeneratorBasedBuilder):
109
  data = json.load(f)
110
  counter = 0
111
  for idx, annotation_type in enumerate(data.keys()):
 
112
  # Parse your line into the appropriate fields
113
  samples = data[annotation_type]
114
  for idx_2, elem in enumerate(samples):
115
  # example = parse_line_to_example(line)
116
  if elem["content"] != [None]:
117
- unique_id = f"{elem['pid']}_{idx_2}"
118
  content = elem["content"][0]
119
  # print(literal_eval(content), "done")
120
  yield unique_id, {
@@ -126,97 +132,4 @@ class PAIRDataset(datasets.GeneratorBasedBuilder):
126
 
127
 
128
  #"annotation_type": annotation_type,
129
- #"annotation": content,
130
- # class PAIRDataset(datasets.GeneratorBasedBuilder):
131
- # """TODO: Short description of my dataset."""
132
- #
133
- # VERSION = datasets.Version("1.1.0")
134
- #
135
- # # This is an example of a dataset with multiple configurations.
136
- # # If you don't want/need to define several sub-sets in your dataset,
137
- # # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
138
- #
139
- # # If you need to make complex sub-parts in the datasets with configurable options
140
- # # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
141
- # # BUILDER_CONFIG_CLASS = MyBuilderConfig
142
- #
143
- # # You will be able to load one or the other configurations in the following list with
144
- # # data = datasets.load_dataset('my_dataset', 'first_domain')
145
- # # data = datasets.load_dataset('my_dataset', 'second_domain')
146
- # BUILDER_CONFIGS = [
147
- # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
148
- # ]
149
- #
150
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
151
- #
152
- # def _info(self):
153
- # # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
154
- # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
155
- # features = datasets.Features(
156
- # {
157
- # "sentence": datasets.Value("string"),
158
- # "option1": datasets.Value("string"),
159
- # "answer": datasets.Value("string")
160
- # # These are the features of your dataset like images, labels ...
161
- # }
162
- # )
163
- #
164
- # return datasets.DatasetInfo(
165
- # # This is the description that will appear on the datasets page.
166
- # description=_DESCRIPTION,
167
- # # This defines the different columns of the dataset and their types
168
- # features=features, # Here we define them above because they are different between the two configurations
169
- # # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
170
- # # specify them. They'll be used if as_supervised=True in builder.as_dataset.
171
- # # supervised_keys=("sentence", "label"),
172
- # # Homepage of the dataset for documentation
173
- # homepage=_HOMEPAGE,
174
- # # License for the dataset if available
175
- # license=_LICENSE,
176
- # # Citation for the dataset
177
- # citation=_CITATION,
178
- # )
179
- #
180
- # def _split_generators(self, dl_manager):
181
- # # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
182
- # # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
183
- #
184
- # # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
185
- # # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
186
- # # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
187
- # # urls = _URLS[self.config.name]
188
- # #data_dir = dl_manager.download_and_extract(urls)
189
- # return [
190
- # # datasets.SplitGenerator(
191
- # # name=datasets.Split.TRAIN,
192
- # # # These kwargs will be passed to _generate_examples
193
- # ### gen_kwargs={
194
- # # "filepath": os.path.join(data_dir, "train.json"),
195
- # # "split": "train",
196
- # # },
197
- # # ),
198
- # datasets.SplitGenerator(
199
- # name=datasets.Split.TEST,
200
- # # These kwargs will be passed to _generate_examples
201
- # gen_kwargs={
202
- # "filepath": "test.json", #os.path.join(data_dir, "test.json"),
203
- # "split": "test"
204
- # },
205
- # ),
206
- # ]
207
- #
208
- # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
209
- # def _generate_examples(self, filepath, split):
210
- # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
211
- # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
212
- # with open(filepath, encoding="utf-8") as f:
213
- # for key, row in enumerate(f):
214
- # data = json.loads(row)
215
- # if data['content'] != [None]:
216
- # yield key, {
217
- # "sequence": data["seq"],
218
- # "pid": data["pid"],
219
- # #"content": "" if split == "test" else data["answer"],
220
- # "content": data['content'][0],
221
- # }
222
 
 
49
 
50
 
51
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
52
+ annotation2type = {"names": datasets.Value("string"),
53
+ "EC": datasets.Sequence(datasets.Value("string")),
54
+ }
55
 
56
  class PAIRDataset(datasets.GeneratorBasedBuilder):
57
  """PAIRDataset."""
58
+ def __init__(self, *args, annotation_type=None, **kwargs):
59
+ super().__init__(*args, **kwargs)
60
+ self.annotation_type = annotation_type # Save the custom argument for later use
61
+ print(self.annotation_type)
62
+ exit()
63
  def _info(self):
64
  """_info."""
65
  return datasets.DatasetInfo(
66
  description="My custom dataset.",
67
  features=datasets.Features(
68
  {
69
+ self.annotation_type: annotation2type[self.annotation_type],
 
70
  "sequence": datasets.Value("string"),
71
  "pid": datasets.Value("string"),
72
  }
 
114
  data = json.load(f)
115
  counter = 0
116
  for idx, annotation_type in enumerate(data.keys()):
117
+ if annotation_type != self.annotation_type: continue
118
  # Parse your line into the appropriate fields
119
  samples = data[annotation_type]
120
  for idx_2, elem in enumerate(samples):
121
  # example = parse_line_to_example(line)
122
  if elem["content"] != [None]:
123
+ unique_id = f"{elem['pid']}_{idx}"
124
  content = elem["content"][0]
125
  # print(literal_eval(content), "done")
126
  yield unique_id, {
 
132
 
133
 
134
  #"annotation_type": annotation_type,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135