Spaces:
Build error
Build error
Upload 3 files
Browse files- app.py +101 -0
- packages.txt +1 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
os.system('pip install --upgrade transformers')
|
3 |
+
# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
|
4 |
+
os.system('pip install torch==1.8.1+cpu torchvision==0.9.1+cpu torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html')
|
5 |
+
# install detectron2 that matches pytorch 1.8
|
6 |
+
os.system('pip install --upgrade detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html')
|
7 |
+
|
8 |
+
## install PyTesseract
|
9 |
+
os.system('pip install -q pytesseract')
|
10 |
+
|
11 |
+
import gradio as gr
|
12 |
+
import numpy as np
|
13 |
+
from transformers import LayoutXLMProcessor, LayoutLMv2ForTokenClassification
|
14 |
+
from datasets import load_dataset
|
15 |
+
import torch
|
16 |
+
from PIL import Image, ImageDraw, ImageFont
|
17 |
+
from itertools import chain
|
18 |
+
|
19 |
+
processor = LayoutXLMProcessor.from_pretrained("amir22010/layoutxlm-xfund-ja")
|
20 |
+
model = LayoutLMv2ForTokenClassification.from_pretrained("amir22010/layoutxlm-xfund-ja",num_labels = 7)
|
21 |
+
|
22 |
+
# load image example
|
23 |
+
dataset = load_dataset("ranpox/xfund", lang='ja', split="test")
|
24 |
+
image = Image.open(dataset[0]["image_path"]).convert("RGB")
|
25 |
+
image = Image.open("./invoice.png")
|
26 |
+
image.save("document.png")
|
27 |
+
# define id2label, label2color
|
28 |
+
labels = [
|
29 |
+
'O',
|
30 |
+
'B-QUESTION',
|
31 |
+
'B-ANSWER',
|
32 |
+
'B-HEADER',
|
33 |
+
'I-ANSWER',
|
34 |
+
'I-QUESTION',
|
35 |
+
'I-HEADER'
|
36 |
+
]
|
37 |
+
|
38 |
+
id2label = {v: k for v, k in enumerate(labels)}
|
39 |
+
label2id = {k: v for v, k in enumerate(labels)}
|
40 |
+
|
41 |
+
def unnormalize_box(bbox, width, height):
|
42 |
+
return [
|
43 |
+
width * (bbox[0] / 1000),
|
44 |
+
height * (bbox[1] / 1000),
|
45 |
+
width * (bbox[2] / 1000),
|
46 |
+
height * (bbox[3] / 1000),
|
47 |
+
]
|
48 |
+
|
49 |
+
def iob_to_label(label):
|
50 |
+
label = label[2:]
|
51 |
+
if not label:
|
52 |
+
return 'other'
|
53 |
+
return label
|
54 |
+
|
55 |
+
label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
|
56 |
+
|
57 |
+
def infer(image):
|
58 |
+
# Use this if you're loading images
|
59 |
+
#image = Image.open(img_path).convert("RGB")
|
60 |
+
#image = image.convert("RGB") # loading PDFs
|
61 |
+
encoding = processor(image, return_offsets_mapping=True, return_tensors="pt", truncation=True, max_length=514)#max_positional_embeddings
|
62 |
+
offset_mapping = encoding.pop('offset_mapping')
|
63 |
+
outputs = model(**encoding)
|
64 |
+
predictions = outputs.logits.argmax(-1).squeeze().tolist()
|
65 |
+
token_boxes = encoding.bbox.squeeze().tolist()
|
66 |
+
width, height = image.size
|
67 |
+
is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
|
68 |
+
|
69 |
+
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
|
70 |
+
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
|
71 |
+
draw = ImageDraw.Draw(image)
|
72 |
+
|
73 |
+
font = ImageFont.load_default()
|
74 |
+
|
75 |
+
for prediction, box in zip(true_predictions, true_boxes):
|
76 |
+
predicted_label = iob_to_label(prediction).lower()
|
77 |
+
draw.rectangle(box, outline=label2color[predicted_label])
|
78 |
+
draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
|
79 |
+
|
80 |
+
return image
|
81 |
+
|
82 |
+
|
83 |
+
title = "Interactive demo: layoutxlm-ja"
|
84 |
+
description = "Demo for Microsoft's layoutxlm-ja, a Transformer for state-of-the-art document image understanding tasks. For More Information - https://huggingface.co/microsoft/layoutxlm-base. This particular model is fine-tuned on XFUND japanese, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
|
85 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.08836' target='_blank'>LayoutXLM: LayoutXLM is a multilingual variant of LayoutLMv2. Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
|
86 |
+
examples =[['document.png']]
|
87 |
+
|
88 |
+
css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
|
89 |
+
|
90 |
+
css = ".image-preview {height: auto !important;}"
|
91 |
+
|
92 |
+
iface = gr.Interface(fn=infer,
|
93 |
+
inputs=gr.inputs.Image(type="pil"),
|
94 |
+
outputs=gr.outputs.Image(type="pil", label="annotated image"),
|
95 |
+
title=title,
|
96 |
+
description=description,
|
97 |
+
article=article,
|
98 |
+
examples=examples,
|
99 |
+
css=css,
|
100 |
+
enable_queue=True)
|
101 |
+
iface.launch(debug=True)
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
tesseract-ocr
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
Pillow
|
3 |
+
numpy
|
4 |
+
datasets
|
5 |
+
torch
|
6 |
+
transformers
|