Spaces:
Sleeping
Sleeping
File size: 9,644 Bytes
80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 022b5a1 80f1239 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import cv2
#from aruco_detector import ArucoDetector
import gradio as gr
import os
dict_list = ['DICT_4X4_50',
'DICT_4X4_100',
'DICT_4X4_250',
'DICT_4X4_1000',
'DICT_5X5_50',
'DICT_5X5_100',
'DICT_5X5_250',
'DICT_5X5_1000',
'DICT_6X6_50',
'DICT_6X6_100',
'DICT_6X6_250',
'DICT_6X6_1000',
'DICT_7X7_50',
'DICT_7X7_100',
'DICT_7X7_250',
'DICT_7X7_1000',
'DICT_ARUCO_ORIGINAL',
'DICT_APRILTAG_16h5',
'DICT_APRILTAG_25h9',
'DICT_APRILTAG_36h10',
'DICT_APRILTAG_36h11',
'DICT_ARUCO_MIP_36h12']
def inference(image_path, dict_name, draw_rejects,
adaptiveThreshWinSizeMin, adaptiveThreshWinSizeMax, adaptiveThreshWinSizeStep, adaptiveThreshConstant,
minMarkerPerimeterRate, maxMarkerPerimeterRate,
polygonalApproxAccuracyRate, minCornerDistanceRate, minDistanceToBorder, minMarkerDistanceRate,
cornerRefinementMethod, cornerRefinementWinSize, cornerRefinementMaxIterations, cornerRefinementMinAccuracy,
markerBoderBits, perspectiveRemovePixelPerCell, perspectiveRemoveIgnoredMarginPerCell, maxErroneousBitsInBorderRate, minOtsuStdDev, errorCorrectionRate):
if not dict_name:
raise gr.Error("No model selected. Please select a model.")
if not image_path:
raise gr.Error("No image provided. Please upload an image.")
dict_index = dict_list.index(dict_name)
aruco_dict = cv2.aruco.getPredefinedDictionary(dict_index)
aruco_params = cv2.aruco.DetectorParameters()
aruco_params.adaptiveThreshWinSizeMin = int(adaptiveThreshWinSizeMin)
aruco_params.adaptiveThreshWinSizeMax = int(adaptiveThreshWinSizeMax)
aruco_params.adaptiveThreshWinSizeStep = int(adaptiveThreshWinSizeStep)
aruco_params.adaptiveThreshConstant = int(adaptiveThreshConstant)
aruco_params.minMarkerPerimeterRate = minMarkerPerimeterRate
aruco_params.maxMarkerPerimeterRate = maxMarkerPerimeterRate
aruco_params.polygonalApproxAccuracyRate = polygonalApproxAccuracyRate
aruco_params.minCornerDistanceRate = minCornerDistanceRate
aruco_params.minDistanceToBorder = minDistanceToBorder
aruco_params.minMarkerDistanceRate = minMarkerDistanceRate
aruco_params.cornerRefinementMethod = cornerRefinementMethods.index(cornerRefinementMethod)
aruco_params.cornerRefinementWinSize = int(cornerRefinementWinSize)
aruco_params.cornerRefinementMaxIterations = int(cornerRefinementMaxIterations)
aruco_params.cornerRefinementMinAccuracy = cornerRefinementMinAccuracy
aruco_params.markerBorderBits = int(markerBoderBits)
aruco_params.perspectiveRemovePixelPerCell = int(perspectiveRemovePixelPerCell)
aruco_params.perspectiveRemoveIgnoredMarginPerCell = perspectiveRemoveIgnoredMarginPerCell
aruco_params.maxErroneousBitsInBorderRate = maxErroneousBitsInBorderRate
aruco_params.minOtsuStdDev = minOtsuStdDev
aruco_params.errorCorrectionRate = errorCorrectionRate
detector = cv2.aruco.ArucoDetector(aruco_dict, aruco_params)
image = cv2.imread(image_path)
corners, ids, rejectedImgPoints = detector.detectMarkers(image)
thresh_image = cv2.adaptiveThreshold(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, int(adaptiveThreshConstant), 2)
image = cv2.aruco.drawDetectedMarkers(image, corners, ids, borderColor=(0, 255, 0))
if draw_rejects:
image = cv2.aruco.drawDetectedMarkers(image, rejectedImgPoints, borderColor=(0, 0, 255))
for corner in corners:
cv2.polylines(image, [corner.astype(int)], isClosed=True, color=(0, 255, 0), thickness=3)
cv2.imwrite("output.jpg", image)
output_image = cv2.cvtColor(cv2.imread("output.jpg"), cv2.COLOR_BGR2RGB)
# TODO make a gif going through the thresh win size
return output_image, thresh_image
def get_aruco_dict():
#PREDEFINED_DICTIONARY_NAME
return dict_list
aruco_dict = get_aruco_dict()
image_paths= [['examples/cans.png', 'DICT_4X4_50', 3, 23, 4, 7],
['examples/image4k.png', 'DICT_4X4_50', 3, 23, 4, 7],
['examples/pose.png', 'DICT_5X5_1000', 3, 23, 4, 7],
['examples/singlemarkerssource.jpg', 'DICT_6X6_250', 3, 23, 4, 7],
]
cornerRefinementMethods = ["CORNER_REFINE_NONE", "CORNER_REFINE_SUBPIX", "CORNER_REFINE_CONTOUR", "CORNER_REFINE_APRILTAG"]
with gr.Blocks() as demo:
gr.Markdown("# Aruco tag detection\nSelect the aruco library, upload an image, and detect the aruco tags.")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="filepath", label="Upload Image")
dict_dropdown = gr.Dropdown(choices=aruco_dict, label="Select aruco library")
advanced_params = gr.Accordion("Advanced Parameters", open=False)
with advanced_params:
rejects_radio = gr.Checkbox(label="Show Rejects", value=False)
thresh_min_slider = gr.Slider(minimum=3, maximum=100, step=1, value=3, label="adapatativeThreshWinSizeMin")
thresh_max_slider = gr.Slider(minimum=0, maximum=100, step=1, value=23, label="adapatativeThreshWinSizeMax")
thresh_step_slider = gr.Slider(minimum=1, maximum=100, step=1, value=10, label="adapatativeThreshWinSizeStep")
thresh_const_slider = gr.Slider(minimum=0, maximum=50, step=1, value=7, label="adapatativeThreshConstant")
min_marker_p_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.03, label="minMarkerPerimeterRate")
max_marker_p_slider = gr.Slider(minimum=0, maximum=10, step=0.01, value=4.0, label="maxMarkerPerimeterRate")
poly_approx_acc_rate_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.05, label="polygonalApproxAccuracyRate")
min_corner_distance_rate_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.05, label="minCornerDistanceRate")
min_distance_to_border_slider = gr.Slider(minimum=0, maximum=25, step=1, value=3, label="minDistanceToBorder")
min_marker_distance_rate_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.05, label="minMarkerDistanceRate")
cornerRefinementMethod_radio = gr.Radio(
choices=cornerRefinementMethods,
label="cornerRefinementMethod", value=cornerRefinementMethods[0]
)
cornerRefinementWinSize_slider = gr.Slider(minimum=0, maximum=20, step=1, value=5, label="cornerRefinementWinSize")
cornerRefinementMaxIterations_slider = gr.Slider(minimum=1, maximum=50, step=1, value=30, label="cornerRefinementMaxIterations")
cornerRefinementMinAccuracy_slider = gr.Slider(minimum=0.01, maximum=2, step=0.01, value=0.1, label="cornerRefinementMinAccuracy")
markerBoderBits_slider = gr.Slider(minimum=1, maximum=100, step=1, value=1, label="markerBoderBits")
perspectiveRemovePixelPerCell_slider = gr.Slider(minimum=0, maximum=100, step=1, value=8, label="perspectiveRemovePixelPerCell")
perspectiveRemoveIgnoredMarginPerCell_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.13, label="perspectiveRemoveIgnoredMarginPerCell")
maxErroneousBitsInBorderRate_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.04, label="maxErroneousBitsInBorderRate")
minOtsuStdDev_slider = gr.Slider(minimum=0, maximum=10, step=0.1, value=5.0, label="minOtsuStdDev")
errorCorrectionRate_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.6, label="errorCorrectionRate")
submit_button = gr.Button("Submit")
with gr.Column():
output_image = gr.Image(type="numpy", label="Output Image")
thresh_image = gr.Image(type="numpy", label="Thresh Image")
examples = gr.Examples(examples=image_paths, inputs=[image_input, dict_dropdown, thresh_min_slider, thresh_max_slider, thresh_step_slider, thresh_const_slider], outputs=[output_image, thresh_image])
submit_button.click(
inference,
inputs=[image_input, dict_dropdown, rejects_radio, thresh_min_slider, thresh_max_slider, thresh_step_slider, thresh_const_slider, min_marker_p_slider, max_marker_p_slider, poly_approx_acc_rate_slider, min_corner_distance_rate_slider, min_distance_to_border_slider, min_marker_distance_rate_slider, cornerRefinementMethod_radio, cornerRefinementWinSize_slider, cornerRefinementMaxIterations_slider, cornerRefinementMinAccuracy_slider, markerBoderBits_slider, perspectiveRemovePixelPerCell_slider, perspectiveRemoveIgnoredMarginPerCell_slider, maxErroneousBitsInBorderRate_slider, minOtsuStdDev_slider, errorCorrectionRate_slider],
outputs=[output_image, thresh_image]
)
demo.launch() |