MrPotato commited on
Commit
1ca7d39
·
1 Parent(s): 5ebabf8

initial commit

Browse files
Files changed (2) hide show
  1. README.md +0 -0
  2. ref_seg_ger.py +256 -0
README.md ADDED
File without changes
ref_seg_ger.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import csv
18
+ import os
19
+ import numpy as np
20
+ from PIL import Image
21
+ from transformers import AutoTokenizer
22
+ import datasets
23
+ from itertools import chain
24
+ import pandas as pd
25
+
26
+ # TODO: Add BibTeX citation
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """\
29
+ @InProceedings{huggingface:dataset,
30
+ title = {A great new dataset},
31
+ author={huggingface, Inc.
32
+ },
33
+ year={2020}
34
+ }
35
+ """
36
+
37
+ # TODO: Add description of the dataset here
38
+ # You can copy an official description
39
+ _DESCRIPTION = """\
40
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
41
+ """
42
+
43
+ # TODO: Add a link to an official homepage for the dataset here
44
+ _HOMEPAGE = ""
45
+
46
+ # TODO: Add the licence for the dataset here if you can find it
47
+ _LICENSE = ""
48
+
49
+ # TODO: Add link to the official dataset URLs here
50
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
51
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
+ _URLS = [
53
+ "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_train.zip",
54
+ "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_test.zip",
55
+ ]
56
+
57
+ _LABELS = [
58
+ 'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage',
59
+ 'volume', 'year', 'issue', 'title', 'fpage', 'identifier'
60
+ ]
61
+
62
+ _FEATURES = datasets.Features(
63
+ {
64
+ "id": datasets.Value("string"),
65
+ "input_ids": datasets.Sequence(datasets.Value("int64")),
66
+ #"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
67
+ # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
68
+ # "fonts": datasets.Sequence(datasets.Value("string")),
69
+ #"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
70
+ #"original_image": datasets.features.Image(),
71
+ "labels": datasets.Sequence(datasets.features.ClassLabel(
72
+ names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + 'O'
73
+ ))
74
+ # These are the features of your dataset like images, labels ...
75
+ }
76
+ )
77
+
78
+ def load_image(image_path, size=None):
79
+ image = Image.open(image_path).convert("RGB")
80
+ w, h = image.size
81
+ if size is not None:
82
+ # resize image
83
+ image = image.resize((size, size))
84
+ image = np.asarray(image)
85
+ image = image[:, :, ::-1] # flip color channels from RGB to BGR
86
+ image = image.transpose(2, 0, 1) # move channels to first dimension
87
+ return image, (w, h)
88
+
89
+
90
+ # def normalize_bbox(bbox, size):
91
+ # return [
92
+ # int(1000 * int(bbox[0]) / size[0]),
93
+ # int(1000 * int(bbox[1]) / size[1]),
94
+ # int(1000 * int(bbox[2]) / size[0]),
95
+ # int(1000 * int(bbox[3]) / size[1]),
96
+ # ]
97
+ #
98
+ #
99
+ # def simplify_bbox(bbox):
100
+ # return [
101
+ # min(bbox[0::2]),
102
+ # min(bbox[1::2]),
103
+ # max(bbox[2::2]),
104
+ # max(bbox[3::2]),
105
+ # ]
106
+ #
107
+ #
108
+ # def merge_bbox(bbox_list):
109
+ # x0, y0, x1, y1 = list(zip(*bbox_list))
110
+ # return [min(x0), min(y0), max(x1), max(y1)]
111
+
112
+
113
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
114
+ class RefSeg(datasets.GeneratorBasedBuilder):
115
+ """TODO: Short description of my dataset."""
116
+
117
+ CHUNK_SIZE = 512
118
+ VERSION = datasets.Version("1.0.0")
119
+
120
+ # This is an example of a dataset with multiple configurations.
121
+ # If you don't want/need to define several sub-sets in your dataset,
122
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
123
+
124
+ # If you need to make complex sub-parts in the datasets with configurable options
125
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
126
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
127
+
128
+ # You will be able to load one or the other configurations in the following list with
129
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
130
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
131
+ # BUILDER_CONFIGS = [
132
+ # datasets.BuilderConfig(name="sample", version=VERSION,
133
+ # description="This part of my dataset covers a first domain"),
134
+ # datasets.BuilderConfig(name="full", version=VERSION,
135
+ # description="This part of my dataset covers a second domain"),
136
+ # ]
137
+
138
+ # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
139
+ TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
140
+
141
+ def _info(self):
142
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
143
+
144
+ return datasets.DatasetInfo(
145
+ # This is the description that will appear on the datasets page.
146
+ description=_DESCRIPTION,
147
+ # This defines the different columns of the dataset and their types
148
+ features=_FEATURES, # Here we define them above because they are different between the two configurations
149
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
150
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
151
+ # supervised_keys=("sentence", "label"),
152
+ # Homepage of the dataset for documentation
153
+ homepage=_HOMEPAGE,
154
+ # License for the dataset if available
155
+ license=_LICENSE,
156
+ # Citation for the dataset
157
+ citation=_CITATION,
158
+ )
159
+
160
+ def _split_generators(self, dl_manager):
161
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
162
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
163
+
164
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
165
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
166
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
167
+ data_dir = dl_manager.download_and_extract(_URLS)
168
+ # with open(os.path.join(data_dir, "train.csv")) as f:
169
+ # files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
170
+ # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
171
+ # csv.DictReader(f, skipinitialspace=True)]
172
+ # with open(os.path.join(data_dir, "test.csv")) as f:
173
+ # files_test = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
174
+ # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
175
+ # csv.DictReader(f, skipinitialspace=True)]
176
+ # with open(os.path.join(data_dir, "validation.csv")) as f:
177
+ # files_validation = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
178
+ # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
179
+ # csv.DictReader(f, skipinitialspace=True)]
180
+ return [
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TRAIN,
183
+ # These kwargs will be passed to _generate_examples
184
+ gen_kwargs={
185
+ "filepath": data_dir['train'],
186
+ "split": "train",
187
+ },
188
+ ),
189
+
190
+ datasets.SplitGenerator(
191
+ name=datasets.Split.TEST,
192
+ # These kwargs will be passed to _generate_examples
193
+ gen_kwargs={
194
+ "filepath": data_dir['test'],
195
+ "split": "test"
196
+ },
197
+ ),
198
+ ]
199
+
200
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
201
+ def _generate_examples(self, filepath, split):
202
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
203
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
204
+ # print(filepath)
205
+ key = 0
206
+ for f in filepath:
207
+ df = pd.read_csv(f)
208
+ input_ids = []
209
+ labels = []
210
+ for i, row in df.iterrows():
211
+ tokenized_input = self.TOKENIZER(
212
+ row['token'],
213
+ add_special_tokens=False,
214
+ return_offsets_mapping=False,
215
+ return_attention_mask=False,
216
+ )
217
+ if len(tokenized_input['input_ids']) > 1:
218
+ if row['tag'] == 'B':
219
+ input_ids.append(tokenized_input['input_ids'][0])
220
+ labels.append(row['tag'] + '-' + row['label'])
221
+ for input_id in tokenized_input['input_ids'][1:]:
222
+ input_ids.append(input_id)
223
+ labels.append('I-' + row['label'])
224
+ elif row['tag'] == 'I':
225
+ for input_id in tokenized_input['input_ids']:
226
+ input_ids.append(input_id)
227
+ labels.append('I-' + row['label'])
228
+ else:
229
+ for input_id in tokenized_input['input_ids']:
230
+ input_ids.append(input_id)
231
+ labels.append('O')
232
+ else:
233
+ input_ids.append(tokenized_input['input_ids'])
234
+ if row['tag'] == 'O':
235
+ labels.append(row['tag'])
236
+ else:
237
+ labels.append(row['tag'] + '-' + row['label'])
238
+
239
+ for chunk_id, index in enumerate(range(0, len(input_ids), self.CHUNK_SIZE)):
240
+ split_ids = input_ids[index:index + self.CHUNK_SIZE]
241
+ #split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
242
+ # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
243
+ # split_fonts = fonts[index:index + self.CHUNK_SIZE]
244
+ split_labels = labels[index:index + self.CHUNK_SIZE]
245
+
246
+ yield key, {
247
+ "id": f"{os.path.basename(f)}_{chunk_id}",
248
+ 'input_ids': split_ids,
249
+ #"bbox": split_bboxes,
250
+ # "RGBs": split_rgbs,
251
+ # "fonts": split_fonts,
252
+ #"image": image,
253
+ #"original_image": original_image,
254
+ "labels": split_labels
255
+ }
256
+ key += 1