readingbank / readingbank.py
maveriq's picture
Create readingbank.py
3804a2a
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information."""
from pathlib import Path
import pandas as pd
import datasets
_CITATION = """\
@misc{wang2021layoutreader,
title={LayoutReader: Pre-training of Text and Layout for Reading Order Detection},
author={Zilong Wang and Yiheng Xu and Lei Cui and Jingbo Shang and Furu Wei},
year={2021},
eprint={2108.11591},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information.
"""
_HOMEPAGE = "https://github.com/doc-analysis/ReadingBank"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLS = {
"dataset": "https://layoutlm.blob.core.windows.net/readingbank/dataset/ReadingBank.zip",
}
def parse_files(files):
layout_text = {}
for i in [1,2,3,4,6,7]:
layout_text[f'm{i}'] = {}
for file in files:
stem = file.stem
shard = stem.split('-')[-1]
if 'text' in stem:
layout_text[shard]['text']=file
elif 'layout' in stem:
layout_text[shard]['layout']=file
return layout_text
def get_dataframe(files,split):
df_list = []
for shard in files.keys():
df_list.append(pd.read_json(files[shard][split],lines=True))
df = pd.concat(df_list)
df.reset_index(inplace=True,drop=True)
return df
class ReadingBank(datasets.GeneratorBasedBuilder):
"""ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information."""
VERSION = datasets.Version("1.1.0")
def _info(self):
features = datasets.Features(
{
"src": datasets.Value("string"),
"tgt": datasets.Value("string"),
"bleu": datasets.Value("float"),
"tgt_index": datasets.Sequence(datasets.Value("int16")),
"original_filename": datasets.Value("string"),
"filename": datasets.Value("string"),
"page_idx": datasets.Value("int16"),
"src_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
"tgt_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["dataset"]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": parse_files(list(Path(f'{data_dir}/train/').glob('*'))),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": parse_files(list(Path(f'{data_dir}/dev/').glob('*'))),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": parse_files(list(Path(f'{data_dir}/test/').glob('*'))),
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
print('\nCreating dataframes.. please wait..')
text_df = get_dataframe(filepath,'text')
layout_df = get_dataframe(filepath,'layout')
layout_df.rename(columns={'src':'src_layout',
'tgt':'tgt_layout'},inplace=True)
df = text_df.merge(layout_df,left_index=True,right_index=True)
print('Dataframes created..\n')
yield from enumerate(df.to_dict(orient='records'))