|
from inference_sdk import InferenceHTTPClient |
|
from config import Settings |
|
from PIL import Image, ImageDraw |
|
|
|
def draw_rectangle(image, x, y, width, height, **kwargs): |
|
|
|
draw = ImageDraw.Draw(image) |
|
|
|
x1 = x - width // 2 |
|
y1 = y - height // 2 |
|
x2 = x1 + width |
|
y2 = y1 + height |
|
|
|
|
|
draw.rectangle(((x1, y1), (x2, y2)), fill=(255, 255, 255)) |
|
return image |
|
|
|
def crop_image(image, x, y, width, height, **kwargs): |
|
|
|
left = x - width // 2 |
|
top = y - height // 2 |
|
right = left + width |
|
bottom = top + height |
|
|
|
|
|
cropped_image = image.crop((left, top, right, bottom)) |
|
return cropped_image, left, top, (right-left), (bottom-top) |
|
|
|
def DetectHandwritting(image): |
|
settings = Settings() |
|
CLIENT = InferenceHTTPClient( |
|
api_url=settings.ROBOFLOW_URL, |
|
api_key=settings.ROBOFLOW_API_KEY |
|
) |
|
result = CLIENT.infer(image, model_id=settings.YOLO_MODEL_ID) |
|
cpy = image.copy() |
|
handwritten_parts = [] |
|
for prediction in result['predictions']: |
|
cpy = draw_rectangle(cpy, **prediction) |
|
handwritten_parts.append(crop_image(cpy, **prediction)) |
|
return cpy, handwritten_parts |