Spaces:
Build error
Build error
diegulio
commited on
Commit
·
e4e17fb
1
Parent(s):
c5d2e12
multiprocess and examples
Browse files- cedula/app.py +16 -2
- examples/.DS_Store +0 -0
- examples/cedula1.jpg +0 -0
- examples/license1.jpg +0 -0
- license/app.py +10 -2
cedula/app.py
CHANGED
@@ -7,6 +7,7 @@ from transformers import DonutProcessor, VisionEncoderDecoderModel
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from pathlib import Path
|
|
|
10 |
|
11 |
from models.experimental import attempt_load
|
12 |
from utils.datasets import LoadImage
|
@@ -17,6 +18,9 @@ import cv2
|
|
17 |
|
18 |
key = str(os.environ.get('key'))
|
19 |
|
|
|
|
|
|
|
20 |
def check_image(image):
|
21 |
try:
|
22 |
images = convert_from_path(Path(image.name), fmt="jpeg", size=(960,1280))
|
@@ -94,8 +98,8 @@ def crop(files = '', #files
|
|
94 |
def get_attributes(input_img):
|
95 |
#access_token = str(os.environ.get('key'))
|
96 |
access_token = key
|
97 |
-
processor = DonutProcessor.from_pretrained("ClipAI/
|
98 |
-
model = VisionEncoderDecoderModel.from_pretrained("ClipAI/
|
99 |
|
100 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
101 |
|
@@ -143,6 +147,16 @@ def get_attributes(input_img):
|
|
143 |
#demo.launch()
|
144 |
|
145 |
def create_model():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
with gr.Blocks() as demo:
|
147 |
with gr.Row():
|
148 |
with gr.Column():
|
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from pathlib import Path
|
10 |
+
import multiprocessing
|
11 |
|
12 |
from models.experimental import attempt_load
|
13 |
from utils.datasets import LoadImage
|
|
|
18 |
|
19 |
key = str(os.environ.get('key'))
|
20 |
|
21 |
+
desired_num_threads = multiprocessing.cpu_count()
|
22 |
+
torch.set_num_threads(desired_num_threads)
|
23 |
+
|
24 |
def check_image(image):
|
25 |
try:
|
26 |
images = convert_from_path(Path(image.name), fmt="jpeg", size=(960,1280))
|
|
|
98 |
def get_attributes(input_img):
|
99 |
#access_token = str(os.environ.get('key'))
|
100 |
access_token = key
|
101 |
+
processor = DonutProcessor.from_pretrained("ClipAI/cedula-demo", use_auth_token=access_token)
|
102 |
+
model = VisionEncoderDecoderModel.from_pretrained("ClipAI/cedula-demo", use_auth_token=access_token)
|
103 |
|
104 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
105 |
|
|
|
147 |
#demo.launch()
|
148 |
|
149 |
def create_model():
|
150 |
+
demo = gr.Interface(get_attributes,
|
151 |
+
"image",
|
152 |
+
"json",
|
153 |
+
examples=[["examples/cedula1.jpg"]]
|
154 |
+
)
|
155 |
+
return demo
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
def create_model2():
|
160 |
with gr.Blocks() as demo:
|
161 |
with gr.Row():
|
162 |
with gr.Column():
|
examples/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
examples/cedula1.jpg
ADDED
![]() |
examples/license1.jpg
ADDED
![]() |
license/app.py
CHANGED
@@ -7,6 +7,7 @@ from transformers import DonutProcessor, VisionEncoderDecoderModel
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from pathlib import Path
|
|
|
10 |
|
11 |
from models.experimental import attempt_load
|
12 |
from utils.datasets import LoadImage
|
@@ -17,6 +18,9 @@ import cv2
|
|
17 |
|
18 |
key = str(os.environ.get('key'))
|
19 |
|
|
|
|
|
|
|
20 |
def check_image(image):
|
21 |
try:
|
22 |
images = convert_from_path(Path(image.name), fmt="jpeg", size=(960,1280))
|
@@ -141,10 +145,14 @@ def get_attributes(input_img):
|
|
141 |
#demo.launch()
|
142 |
|
143 |
def create_model():
|
144 |
-
demo = gr.Interface(get_attributes,
|
|
|
|
|
|
|
|
|
145 |
return demo
|
146 |
|
147 |
-
|
148 |
if __name__ == '__main__':
|
149 |
demo = create_model()
|
150 |
demo.launch()
|
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from pathlib import Path
|
10 |
+
import multiprocessing
|
11 |
|
12 |
from models.experimental import attempt_load
|
13 |
from utils.datasets import LoadImage
|
|
|
18 |
|
19 |
key = str(os.environ.get('key'))
|
20 |
|
21 |
+
desired_num_threads = multiprocessing.cpu_count()
|
22 |
+
torch.set_num_threads(desired_num_threads)
|
23 |
+
|
24 |
def check_image(image):
|
25 |
try:
|
26 |
images = convert_from_path(Path(image.name), fmt="jpeg", size=(960,1280))
|
|
|
145 |
#demo.launch()
|
146 |
|
147 |
def create_model():
|
148 |
+
demo = gr.Interface(get_attributes,
|
149 |
+
"image",
|
150 |
+
"json",
|
151 |
+
examples=[["examples/license1.jpg"]]
|
152 |
+
)
|
153 |
return demo
|
154 |
|
155 |
+
["examples/licencia.jpg"]
|
156 |
if __name__ == '__main__':
|
157 |
demo = create_model()
|
158 |
demo.launch()
|