valerii777 commited on
Commit
7f4886d
·
verified ·
1 Parent(s): 8eb1e20

Initial (no) commit (v0.0.1)

Browse files
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from pathlib import Path
3
+ import cv2
4
+ import uvicorn
5
+ import numpy as np
6
+ import pandas as pd
7
+ from PIL import Image
8
+ from rapidocr_onnxruntime import RapidOCR
9
+ from fastapi import FastAPI, File, UploadFile, Query
10
+ from fastapi.responses import RedirectResponse
11
+
12
+ app = FastAPI()
13
+
14
+ det_models = [
15
+ "ch_PP-OCRv4_det_infer.onnx",
16
+ "ch_PP-OCRv3_det_infer.onnx",
17
+ "ch_PP-OCRv2_det_infer.onnx",
18
+ "ch_ppocr_server_v2.0_det_infer.onnx",
19
+ ]
20
+
21
+ rec_models = [
22
+ "ch_PP-OCRv4_rec_infer.onnx",
23
+ "ch_PP-OCRv3_rec_infer.onnx",
24
+ "ch_PP-OCRv2_rec_infer.onnx",
25
+ "ch_PP-OCRv4_det_server_infer.onnx",
26
+ "ch_ppocr_server_v2.0_rec_infer.onnx",
27
+ "en_PP-OCRv3_rec_infer.onnx",
28
+ "en_number_mobile_v2.0_rec_infer.onnx",
29
+ "korean_mobile_v2.0_rec_infer.onnx",
30
+ "japan_rec_crnn_v2.onnx",
31
+ ]
32
+
33
+
34
+ def get_text(
35
+ image,
36
+ text_det=None,
37
+ text_rec=None,
38
+ box_thresh=0.5,
39
+ unclip_ratio=1.6,
40
+ text_score=0.5,
41
+ ):
42
+ det_model_path = str(Path("models") / "text_det" / text_det)
43
+ rec_model_path = str(Path("models") / "text_rec" / text_rec)
44
+
45
+ if (
46
+ "v2" in rec_model_path
47
+ or "korean" in rec_model_path
48
+ or "japan" in rec_model_path
49
+ ):
50
+ rec_image_shape = [3, 32, 320]
51
+ else:
52
+ rec_image_shape = [3, 48, 320]
53
+
54
+ rapid_ocr = RapidOCR(
55
+ det_model_path=det_model_path,
56
+ rec_model_path=rec_model_path,
57
+ rec_img_shape=rec_image_shape,
58
+ )
59
+
60
+ ocr_result, infer_elapse = rapid_ocr(
61
+ image, box_thresh=box_thresh, unclip_ratio=unclip_ratio, text_score=text_score
62
+ )
63
+ if not ocr_result or not infer_elapse:
64
+ return None
65
+ det_cost, cls_cost, rec_cost = infer_elapse
66
+ dt_boxes, rec_res, scores = list(zip(*ocr_result))
67
+ out_df = pd.DataFrame(
68
+ [[rec, score] for rec, score in zip(rec_res, scores)],
69
+ columns=("Rec", "Score"),
70
+ )
71
+ return out_df
72
+
73
+ @app.get("/", include_in_schema=False)
74
+ def docs_redirect():
75
+ return RedirectResponse(url='/docs')
76
+
77
+ @app.post("/ocr")
78
+ def create_upload_file(
79
+ file: UploadFile,
80
+ text_det: str = Query("ch_PP-OCRv4_det_infer.onnx", enum=det_models),
81
+ text_rec: str = Query("en_number_mobile_v2.0_rec_infer.onnx", enum=rec_models),
82
+ box_thresh: float = 0.5,
83
+ unclip_ratio: float = 1.6,
84
+ text_score: float = 0.5,
85
+ ):
86
+ resp = get_text(
87
+ file.file.read(), text_det, text_rec, box_thresh, unclip_ratio, text_score
88
+ )
89
+ return resp.to_dict("list")
90
+
91
+
92
+ if __name__ == "__main__":
93
+ uvicorn.run(app, host="0.0.0.0", port=7860)
models/text_det/ch_PP-OCRv2_det_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d01120eab9f00e8d7541ad0f906dd9c7f28394233e47279073fab73f82da87
3
+ size 2341867
models/text_det/ch_PP-OCRv3_det_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3439588c030faea393a54515f51e983d8e155b19a2e8aba7891934c1cf0de526
3
+ size 2432880
models/text_det/ch_PP-OCRv4_det_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2a7720d45a54257208b1e13e36a8479894cb74155a5efe29462512d42f49da9
3
+ size 4745517
models/text_det/ch_PP-OCRv4_det_server_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfa39a3f298f6d3fc71789834d15da36d11a6c59b489fc16ea4733728012f786
3
+ size 113352104
models/text_det/ch_ppocr_server_v2.0_det_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a7e74057c1f4e17f38bf3e0a9f500e64e83159e4fd58775f4953f13438370ca
3
+ size 48866380
models/text_rec/ch_PP-OCRv2_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86eec59d02644a8890d54f4aa5fe7402cf2848450fc7ac613d4677cd6025b3da
3
+ size 8402009
models/text_rec/ch_PP-OCRv3_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897a3ededb38fee0dae2c1ccee38241f37df202c9509e3abca02e9217c5ee615
3
+ size 10690752
models/text_rec/ch_PP-OCRv4_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48fc40f24f6d2a207a2b1091d3437eb3cc3eb6b676dc3ef9c37384005483683b
3
+ size 10857958
models/text_rec/ch_ppocr_server_v2.0_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d0714a2b44f5d7ac7a17e44d0e1961393447590c895f4197f37e855e3464f53
3
+ size 111590126
models/text_rec/en_PP-OCRv3_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef7abd8bd3629ae57ea2c28b425c1bd258a871b93fd2fe7c433946ade9b5d9ea
3
+ size 8967018
models/text_rec/en_number_mobile_v2.0_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e679ba625c544444be78292a50d9e1af9caa1569239a88bb8b864cb688b11c01
3
+ size 1882607
models/text_rec/japan_rec_crnn_v2.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b0495059f5738166e606d864b04ff00093f67a807efb02cddf472839cae970c
3
+ size 3571807
models/text_rec/korean_mobile_v2.0_rec_infer.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6558500138b43b46a4941957fb8c918546dae5fb0e71718536f1883acc80faf
3
+ size 3290650
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Pillow
2
+ onnxruntime
3
+ rapidocr_onnxruntime
4
+ streamlit_image_select
5
+ python-multipart
6
+ fastapi
7
+ uvicorn
utils.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- encoding: utf-8 -*-
2
+ # @Author: SWHL
3
+ # @Contact: [email protected]
4
+ import math
5
+ import random
6
+ from pathlib import Path
7
+
8
+ import numpy as np
9
+ from PIL import Image, ImageDraw, ImageFont
10
+
11
+
12
+ def draw_ocr_box_txt(image, boxes, txts, font_path, scores=None, text_score=0.5):
13
+ h, w = image.height, image.width
14
+ img_left = image.copy()
15
+ img_right = Image.new("RGB", (w, h), (255, 255, 255))
16
+
17
+ random.seed(0)
18
+ draw_left = ImageDraw.Draw(img_left)
19
+ draw_right = ImageDraw.Draw(img_right)
20
+ for idx, (box, txt) in enumerate(zip(boxes, txts)):
21
+ if scores is not None and float(scores[idx]) < text_score:
22
+ continue
23
+
24
+ color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
25
+
26
+ box = [tuple(v) for v in box]
27
+ draw_left.polygon(box, fill=color)
28
+ draw_right.text([box[3][0], box[3][1]], str(idx), fill=color)
29
+
30
+ draw_right.polygon(
31
+ [
32
+ box[0][0],
33
+ box[0][1],
34
+ box[1][0],
35
+ box[1][1],
36
+ box[2][0],
37
+ box[2][1],
38
+ box[3][0],
39
+ box[3][1],
40
+ ],
41
+ outline=color,
42
+ )
43
+
44
+ box_height = math.sqrt(
45
+ (box[0][0] - box[3][0]) ** 2 + (box[0][1] - box[3][1]) ** 2
46
+ )
47
+
48
+ box_width = math.sqrt(
49
+ (box[0][0] - box[1][0]) ** 2 + (box[0][1] - box[1][1]) ** 2
50
+ )
51
+
52
+ if box_height > 2 * box_width:
53
+ font_size = max(int(box_width * 0.9), 10)
54
+ font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
55
+ cur_y = box[0][1]
56
+ for c in txt:
57
+ char_size = font.getsize(c)
58
+ draw_right.text((box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
59
+ cur_y += char_size[1]
60
+ else:
61
+ font_size = max(int(box_height * 0.8), 10)
62
+ font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
63
+ draw_right.text([box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
64
+
65
+ img_left = Image.blend(image, img_left, 0.5)
66
+ img_show = Image.new("RGB", (w * 2, h), (255, 255, 255))
67
+ img_show.paste(img_left, (0, 0, w, h))
68
+ img_show.paste(img_right, (w, 0, w * 2, h))
69
+ return np.array(img_show)
70
+
71
+
72
+ def visualize(image, boxes, txts, scores, font_path="./fonts/FZYTK.TTF"):
73
+ draw_img = draw_ocr_box_txt(image, boxes, txts, font_path, scores, text_score=0.5)
74
+
75
+ draw_img_save = Path("./inference_results/")
76
+ if not draw_img_save.exists():
77
+ draw_img_save.mkdir(parents=True, exist_ok=True)
78
+ return draw_img[:, :, ::-1]