maveriq commited on
Commit
3804a2a
·
1 Parent(s): 0044fc2

Create readingbank.py

Browse files
Files changed (1) hide show
  1. readingbank.py +149 -0
readingbank.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information."""
15
+
16
+
17
+ from pathlib import Path
18
+ import pandas as pd
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @misc{wang2021layoutreader,
24
+ title={LayoutReader: Pre-training of Text and Layout for Reading Order Detection},
25
+ author={Zilong Wang and Yiheng Xu and Lei Cui and Jingbo Shang and Furu Wei},
26
+ year={2021},
27
+ eprint={2108.11591},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information.
35
+ """
36
+
37
+ _HOMEPAGE = "https://github.com/doc-analysis/ReadingBank"
38
+
39
+ # TODO: Add the licence for the dataset here if you can find it
40
+ _LICENSE = ""
41
+
42
+ _URLS = {
43
+ "dataset": "https://layoutlm.blob.core.windows.net/readingbank/dataset/ReadingBank.zip",
44
+ }
45
+
46
+ def parse_files(files):
47
+ layout_text = {}
48
+
49
+ for i in [1,2,3,4,6,7]:
50
+ layout_text[f'm{i}'] = {}
51
+
52
+ for file in files:
53
+ stem = file.stem
54
+ shard = stem.split('-')[-1]
55
+ if 'text' in stem:
56
+ layout_text[shard]['text']=file
57
+ elif 'layout' in stem:
58
+ layout_text[shard]['layout']=file
59
+
60
+ return layout_text
61
+
62
+ def get_dataframe(files,split):
63
+ df_list = []
64
+ for shard in files.keys():
65
+ df_list.append(pd.read_json(files[shard][split],lines=True))
66
+ df = pd.concat(df_list)
67
+ df.reset_index(inplace=True,drop=True)
68
+ return df
69
+
70
+ class ReadingBank(datasets.GeneratorBasedBuilder):
71
+ """ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information."""
72
+
73
+ VERSION = datasets.Version("1.1.0")
74
+
75
+ def _info(self):
76
+ features = datasets.Features(
77
+ {
78
+ "src": datasets.Value("string"),
79
+ "tgt": datasets.Value("string"),
80
+ "bleu": datasets.Value("float"),
81
+ "tgt_index": datasets.Sequence(datasets.Value("int16")),
82
+ "original_filename": datasets.Value("string"),
83
+ "filename": datasets.Value("string"),
84
+ "page_idx": datasets.Value("int16"),
85
+ "src_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
86
+ "tgt_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
87
+ # These are the features of your dataset like images, labels ...
88
+ }
89
+ )
90
+ return datasets.DatasetInfo(
91
+ # This is the description that will appear on the datasets page.
92
+ description=_DESCRIPTION,
93
+ # This defines the different columns of the dataset and their types
94
+ features=features, # Here we define them above because they are different between the two configurations
95
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
96
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
97
+ # supervised_keys=("sentence", "label"),
98
+ # Homepage of the dataset for documentation
99
+ homepage=_HOMEPAGE,
100
+ # License for the dataset if available
101
+ license=_LICENSE,
102
+ # Citation for the dataset
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+
108
+ urls = _URLS["dataset"]
109
+ data_dir = dl_manager.download_and_extract(urls)
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN,
114
+ # These kwargs will be passed to _generate_examples
115
+ gen_kwargs={
116
+ "filepath": parse_files(list(Path(f'{data_dir}/train/').glob('*'))),
117
+ "split": "train",
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ # These kwargs will be passed to _generate_examples
123
+ gen_kwargs={
124
+ "filepath": parse_files(list(Path(f'{data_dir}/dev/').glob('*'))),
125
+ "split": "dev",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST,
130
+ # These kwargs will be passed to _generate_examples
131
+ gen_kwargs={
132
+ "filepath": parse_files(list(Path(f'{data_dir}/test/').glob('*'))),
133
+ "split": "test"
134
+ },
135
+ ),
136
+ ]
137
+
138
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
139
+ def _generate_examples(self, filepath, split):
140
+
141
+ print('\nCreating dataframes.. please wait..')
142
+ text_df = get_dataframe(filepath,'text')
143
+ layout_df = get_dataframe(filepath,'layout')
144
+ layout_df.rename(columns={'src':'src_layout',
145
+ 'tgt':'tgt_layout'},inplace=True)
146
+
147
+ df = text_df.merge(layout_df,left_index=True,right_index=True)
148
+ print('Dataframes created..\n')
149
+ yield from enumerate(df.to_dict(orient='records'))