Luka-Wang commited on
Commit
c0eb116
·
1 Parent(s): 92f2cb3

Update coco.py

Browse files
Files changed (1) hide show
  1. coco.py +28 -115
coco.py CHANGED
@@ -21,10 +21,6 @@ import os
21
 
22
  import datasets
23
 
24
- _URL = "dataset_coco.json"
25
- _URLS = {
26
- "train": _URL
27
- }
28
 
29
  # TODO: Add BibTeX citation
30
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -52,9 +48,9 @@ _LICENSE = ""
52
  # TODO: Add link to the official dataset URLs here
53
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
55
  _URLS = {
56
- "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
57
- "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
58
  }
59
 
60
 
@@ -76,15 +72,15 @@ class NewDataset(datasets.GeneratorBasedBuilder):
76
  # data = datasets.load_dataset('my_dataset', 'first_domain')
77
  # data = datasets.load_dataset('my_dataset', 'second_domain')
78
  BUILDER_CONFIGS = [
79
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
80
  datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
81
  ]
82
 
83
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
84
 
85
  def _info(self):
86
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
88
  features = datasets.Features(
89
  {
90
  "images": datasets.Sequence(
@@ -148,113 +144,30 @@ class NewDataset(datasets.GeneratorBasedBuilder):
148
  citation=_CITATION,
149
  )
150
 
151
- def _split_generators(self, dl_manager):
152
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
153
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
154
-
155
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
156
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
157
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
158
- urls = _URLS[self.config.name]
159
- data_dir = dl_manager.download_and_extract(urls)
160
  return [
161
- datasets.SplitGenerator(
162
- name=datasets.Split.TRAIN,
163
- # These kwargs will be passed to _generate_examples
164
- gen_kwargs={
165
- "filepath": os.path.join(data_dir, "train.jsonl"),
166
- "split": "train",
167
- },
168
- ),
169
- datasets.SplitGenerator(
170
- name=datasets.Split.VALIDATION,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "filepath": os.path.join(data_dir, "dev.jsonl"),
174
- "split": "dev",
175
- },
176
- ),
177
- datasets.SplitGenerator(
178
- name=datasets.Split.TEST,
179
- # These kwargs will be passed to _generate_examples
180
- gen_kwargs={
181
- "filepath": os.path.join(data_dir, "test.jsonl"),
182
- "split": "test"
183
- },
184
- ),
185
  ]
186
 
187
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
188
- def _generate_examples(self, filepath, split):
189
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
190
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
191
- with open(filepath, encoding="utf-8") as f:
192
- for key, row in enumerate(f):
193
- data = json.loads(row)
194
- if self.config.name == "first_domain":
195
- # Yields examples as (key, example) tuples
196
- yield key, {
197
- "sentence": data["sentence"],
198
- "option1": data["option1"],
199
- "answer": "" if split == "test" else data["answer"],
200
- }
201
- else:
202
- yield key, {
203
- "sentence": data["sentence"],
204
- "option2": data["option2"],
205
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
206
- }
207
-
208
- class SuperGlueConfig(datasets.BuilderConfig):
209
- """BuilderConfig for SuperGLUE."""
210
-
211
- def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
212
- """BuilderConfig for SuperGLUE.
213
-
214
- Args:
215
- features: *list[string]*, list of the features that will appear in the
216
- feature dict. Should not include "label".
217
- data_url: *string*, url to download the zip file from.
218
- citation: *string*, citation for the data set.
219
- url: *string*, url for information about the data set.
220
- label_classes: *list[string]*, the list of classes for the label if the
221
- label is present as a string. Non-string labels will be cast to either
222
- 'False' or 'True'.
223
- **kwargs: keyword arguments forwarded to super.
224
- """
225
- # Version history:
226
- # 1.0.2: Fixed non-nondeterminism in ReCoRD.
227
- # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
228
- # the full release (v2.0).
229
- # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
230
- # 0.0.2: Initial version.
231
- super(SuperGlueConfig, self).__init__(version=datasets.Version("1.0.2"), **kwargs)
232
- self.features = features
233
- self.label_classes = label_classes
234
- self.data_url = data_url
235
- self.citation = citation
236
- self.url = url
237
-
238
- class SuperGlue(datasets.GeneratorBasedBuilder):
239
- """The SuperGLUE benchmark."""
240
-
241
- BUILDER_CONFIGS = [
242
- SuperGlueConfig(
243
- name="images",
244
- description=_BOOLQ_DESCRIPTION,
245
- features=["filepath", "sentids","filename","imgid","split","sentences","cocoid"],
246
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
247
- citation=_BOOLQ_CITATION,
248
- url="https://github.com/google-research-datasets/boolean-questions",
249
- ),
250
- ...
251
- ...
252
- SuperGlueConfig(
253
- name="datasets",
254
- description=_AXG_DESCRIPTION,
255
- features=[],
256
- label_classes=["entailment", "not_entailment"],
257
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
258
- citation=_AXG_CITATION,
259
- url="https://github.com/rudinger/winogender-schemas",
260
- ),
 
21
 
22
  import datasets
23
 
 
 
 
 
24
 
25
  # TODO: Add BibTeX citation
26
  # Find for instance the citation on arxiv or on the dataset repo/website
 
48
  # TODO: Add link to the official dataset URLs here
49
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URL = "dataset_coco.json"
52
  _URLS = {
53
+ "train": _URL
 
54
  }
55
 
56
 
 
72
  # data = datasets.load_dataset('my_dataset', 'first_domain')
73
  # data = datasets.load_dataset('my_dataset', 'second_domain')
74
  BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig(name="train", version=VERSION, description="This part of my dataset covers a first domain"),
76
  datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
77
  ]
78
 
79
+ DEFAULT_CONFIG_NAME = "train" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
 
81
  def _info(self):
82
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
83
+ if self.config.name == "train": # This is the name of the configuration selected in BUILDER_CONFIGS above
84
  features = datasets.Features(
85
  {
86
  "images": datasets.Sequence(
 
144
  citation=_CITATION,
145
  )
146
 
147
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
148
+ urls_to_download = self._URLS
149
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
150
+
 
 
 
 
 
151
  return [
152
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
153
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  ]
155
 
156
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
157
+ def _generate_examples(self, filepath):
158
+ """This function returns the examples in the raw (text) form."""
159
+ logger.info("generating examples from = %s", filepath)
160
+ with open(filepath) as f:
161
+ squad = json.load(f)
162
+ for image in squad["images"]:
163
+ filepath = image["filepath"]
164
+ filename = image["filename"]
165
+ split = image["split"]
166
+ for sentence in image["sentences"]:
167
+ raw = sentence["raw"]
168
+ yield id_, {
169
+ "filepath": filepath,
170
+ "filename": filename,
171
+ "split": split,
172
+ "raw": raw
173
+ }