Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -8,20 +8,23 @@ from surya.model.recognition.model import load_model as load_rec_model
|
|
8 |
from surya.model.recognition.processor import load_processor as load_rec_processor
|
9 |
from surya.postprocessing.heatmap import draw_polys_on_image
|
10 |
|
11 |
-
#
|
12 |
det_model, det_processor = load_det_model(), load_det_processor()
|
13 |
rec_model, rec_processor = load_rec_model(), load_rec_processor()
|
14 |
|
15 |
-
#
|
16 |
with open("languages.json", "r") as file:
|
17 |
languages = json.load(file)
|
18 |
-
language_options =
|
19 |
|
20 |
def ocr_function(img, lang_code):
|
21 |
-
|
22 |
-
predictions
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
|
26 |
def text_line_detection_function(img):
|
27 |
preds = batch_inference([img], det_model, det_processor)[0]
|
@@ -32,15 +35,16 @@ with gr.Blocks() as app:
|
|
32 |
gr.Markdown("# Surya OCR e Detecção de Linhas de Texto")
|
33 |
with gr.Tab("OCR"):
|
34 |
with gr.Column():
|
35 |
-
ocr_input_image = gr.Image(label="
|
36 |
-
ocr_language_selector = gr.Dropdown(label="
|
37 |
-
ocr_run_button = gr.Button("
|
38 |
with gr.Column():
|
39 |
-
ocr_output_image = gr.Image(label="
|
40 |
-
ocr_text_output = gr.TextArea(label="
|
41 |
|
42 |
ocr_run_button.click(fn=ocr_function, inputs=[ocr_input_image, ocr_language_selector], outputs=[ocr_output_image, ocr_text_output])
|
43 |
|
|
|
44 |
with gr.Tab("Detecção de Linhas de Texto"):
|
45 |
with gr.Column():
|
46 |
detection_input_image = gr.Image(label="Imagem de Entrada para Detecção", type="pil")
|
|
|
8 |
from surya.model.recognition.processor import load_processor as load_rec_processor
|
9 |
from surya.postprocessing.heatmap import draw_polys_on_image
|
10 |
|
11 |
+
# Load models and processors
|
12 |
det_model, det_processor = load_det_model(), load_det_processor()
|
13 |
rec_model, rec_processor = load_rec_model(), load_rec_processor()
|
14 |
|
15 |
+
# Assuming languages.json maps language codes to names, but we'll use codes directly for dropdown
|
16 |
with open("languages.json", "r") as file:
|
17 |
languages = json.load(file)
|
18 |
+
language_options = list(languages.keys()) # Use codes directly
|
19 |
|
20 |
def ocr_function(img, lang_code):
|
21 |
+
predictions = run_ocr([img], [lang_code], det_model, det_processor, rec_model, rec_processor)
|
22 |
+
# Assuming predictions is a list of dictionaries, one per image
|
23 |
+
if predictions:
|
24 |
+
img_with_text = draw_polys_on_image(predictions[0]["polys"], img)
|
25 |
+
return img_with_text, predictions[0]
|
26 |
+
else:
|
27 |
+
return img, {"error": "No text detected"}
|
28 |
|
29 |
def text_line_detection_function(img):
|
30 |
preds = batch_inference([img], det_model, det_processor)[0]
|
|
|
35 |
gr.Markdown("# Surya OCR e Detecção de Linhas de Texto")
|
36 |
with gr.Tab("OCR"):
|
37 |
with gr.Column():
|
38 |
+
ocr_input_image = gr.Image(label="Input Image for OCR", type="pil")
|
39 |
+
ocr_language_selector = gr.Dropdown(label="Select Language for OCR", choices=language_options, value="en")
|
40 |
+
ocr_run_button = gr.Button("Run OCR")
|
41 |
with gr.Column():
|
42 |
+
ocr_output_image = gr.Image(label="OCR Output Image", type="pil", interactive=False)
|
43 |
+
ocr_text_output = gr.TextArea(label="Recognized Text")
|
44 |
|
45 |
ocr_run_button.click(fn=ocr_function, inputs=[ocr_input_image, ocr_language_selector], outputs=[ocr_output_image, ocr_text_output])
|
46 |
|
47 |
+
|
48 |
with gr.Tab("Detecção de Linhas de Texto"):
|
49 |
with gr.Column():
|
50 |
detection_input_image = gr.Image(label="Imagem de Entrada para Detecção", type="pil")
|