|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information.""" |
|
|
|
|
|
from pathlib import Path |
|
import pandas as pd |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@misc{wang2021layoutreader, |
|
title={LayoutReader: Pre-training of Text and Layout for Reading Order Detection}, |
|
author={Zilong Wang and Yiheng Xu and Lei Cui and Jingbo Shang and Furu Wei}, |
|
year={2021}, |
|
eprint={2108.11591}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/doc-analysis/ReadingBank" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
"dataset": "https://layoutlm.blob.core.windows.net/readingbank/dataset/ReadingBank.zip", |
|
} |
|
|
|
def parse_files(files): |
|
layout_text = {} |
|
|
|
for i in [1,2,3,4,6,7]: |
|
layout_text[f'm{i}'] = {} |
|
|
|
for file in files: |
|
stem = file.stem |
|
shard = stem.split('-')[-1] |
|
if 'text' in stem: |
|
layout_text[shard]['text']=file |
|
elif 'layout' in stem: |
|
layout_text[shard]['layout']=file |
|
|
|
return layout_text |
|
|
|
def get_dataframe(files,split): |
|
df_list = [] |
|
for shard in files.keys(): |
|
df_list.append(pd.read_json(files[shard][split],lines=True)) |
|
df = pd.concat(df_list) |
|
df.reset_index(inplace=True,drop=True) |
|
return df |
|
|
|
class ReadingBank(datasets.GeneratorBasedBuilder): |
|
"""ReadingBank is a benchmark dataset for reading order detection built with weak supervision from WORD documents, which contains 500K document images with a wide range of document types as well as the corresponding reading order information.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"src": datasets.Value("string"), |
|
"tgt": datasets.Value("string"), |
|
"bleu": datasets.Value("float"), |
|
"tgt_index": datasets.Sequence(datasets.Value("int16")), |
|
"original_filename": datasets.Value("string"), |
|
"filename": datasets.Value("string"), |
|
"page_idx": datasets.Value("int16"), |
|
"src_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))), |
|
"tgt_layout": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))), |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
urls = _URLS["dataset"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": parse_files(list(Path(f'{data_dir}/train/').glob('*'))), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": parse_files(list(Path(f'{data_dir}/dev/').glob('*'))), |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": parse_files(list(Path(f'{data_dir}/test/').glob('*'))), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
print('\nCreating dataframes.. please wait..') |
|
text_df = get_dataframe(filepath,'text') |
|
layout_df = get_dataframe(filepath,'layout') |
|
layout_df.rename(columns={'src':'src_layout', |
|
'tgt':'tgt_layout'},inplace=True) |
|
|
|
df = text_df.merge(layout_df,left_index=True,right_index=True) |
|
print('Dataframes created..\n') |
|
yield from enumerate(df.to_dict(orient='records')) |