Hadassah commited on
Commit
dbec14e
·
1 Parent(s): f68ba53

Create food.py

Browse files
Files changed (1) hide show
  1. food.py +123 -0
food.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Food dataset."""
16
+
17
+
18
+ import collections
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\none"""
26
+
27
+ _DESCRIPTION = """\
28
+ A simple food dataset for personal study use. Structure follows the CPPE-5 dataset.
29
+ """
30
+
31
+ _HOMEPAGE = ""
32
+
33
+ _LICENSE = "Unknown"
34
+
35
+ _URL = "https://drive.google.com/uc?id=1fXfOU8EyGn0oiZFclM-fe8FoCigDL41l"
36
+
37
+ _CATEGORIES = ["Broccoli", "Tomato", "Potato"]
38
+
39
+
40
+ class Food(datasets.GeneratorBasedBuilder):
41
+ """Food Dataset"""
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ def _info(self):
46
+ features = datasets.Features(
47
+ {
48
+ "image_id": datasets.Value("int64"),
49
+ "image": datasets.Image(),
50
+ "width": datasets.Value("int32"),
51
+ "height": datasets.Value("int32"),
52
+ "objects": datasets.Sequence(
53
+ {
54
+ "id": datasets.Value("int64"),
55
+ "area": datasets.Value("int64"),
56
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
57
+ "category": datasets.ClassLabel(names=_CATEGORIES),
58
+ }
59
+ ),
60
+ }
61
+ )
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=features,
65
+ homepage=_HOMEPAGE,
66
+ license=_LICENSE,
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ archive = dl_manager.download(_URL)
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ gen_kwargs={
76
+ "annotation_file_path": "annotations/train.json",
77
+ "files": dl_manager.iter_archive(archive),
78
+ },
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ gen_kwargs={
83
+ "annotation_file_path": "annotations/test.json",
84
+ "files": dl_manager.iter_archive(archive),
85
+ },
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, annotation_file_path, files):
90
+ def process_annot(annot, category_id_to_category):
91
+ return {
92
+ "id": annot["id"],
93
+ "area": annot["area"],
94
+ "bbox": annot["bbox"],
95
+ "category": category_id_to_category[annot["category_id"]],
96
+ }
97
+
98
+ image_id_to_image = {}
99
+ idx = 0
100
+ # This loop relies on the ordering of the files in the archive:
101
+ # Annotation files come first, then the images.
102
+ for path, f in files:
103
+ file_name = os.path.basename(path)
104
+ if path == annotation_file_path:
105
+ annotations = json.load(f)
106
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
107
+ image_id_to_annotations = collections.defaultdict(list)
108
+ for annot in annotations["annotations"]:
109
+ image_id_to_annotations[annot["image_id"]].append(annot)
110
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
111
+ elif file_name in image_id_to_image:
112
+ image = image_id_to_image[file_name]
113
+ objects = [
114
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
115
+ ]
116
+ yield idx, {
117
+ "image_id": image["id"],
118
+ "image": {"path": path, "bytes": f.read()},
119
+ "width": image["width"],
120
+ "height": image["height"],
121
+ "objects": objects,
122
+ }
123
+ idx += 1