File size: 6,664 Bytes
44abec2 e66d73b 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 44abec2 ef60eb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
import os
import sys
import cv2
import gradio as gr
import numpy as np
import logging
from datetime import datetime
from pathlib import Path
#Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('app.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(name)
#Add the project root to Python path
project_root = os.path.dirname(os.path.abspath(file))
sys.path.append(project_root)
#Import custom modules and models
from ANPR_IND.scripts.charExtraction import CharExtraction
from ANPR_IND.scripts.bboxAnnotator import BBOXAnnotator
from ultralytics import YOLO
#Initialize ANPR models and classes
wPathPlat = os.path.join(project_root, "ANPR_IND", "licence_plat.pt")
wPathChar = os.path.join(project_root, "ANPR_IND", "licence_character.pt")
classList = np.array([
'A','B','C','D','E','F','G','H','I','J','K','L','M',
'N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
'0','1','2','3','4','5','6','7','8','9'
])
sizePlat = (416, 200)
#Initialize Helmet Detection model path
helmet_model_path = os.path.join(project_root, "Helmet-Detect-model", "best.pt")
#Verify that the required model files exist
required_files = [wPathPlat, wPathChar, helmet_model_path]
for file_path in required_files:
if not os.path.exists(file_path):
logger.error(f"Required model file not found: {file_path}")
raise FileNotFoundError(f"Required model file not found: {file_path}")
#Initialize models
try:
logger.info("Initializing models...")
helmet_model = YOLO(helmet_model_path)
extractor = CharExtraction(
wPlatePath=wPathPlat,
wCharacterPath=wPathChar,
classList=classList,
sizePlate=sizePlat,
conf=0.5
)
annotator = BBOXAnnotator()
logger.info("Models initialized successfully")
except Exception as e:
logger.error(f"Error initializing models: {str(e)}")
raise
def process_image(image, conf=0.45):
start_time = datetime.now()
logger.info(f"Processing image with confidence threshold: {conf}")
text
if image is None:
logger.warning("No image provided")
return None, "No image provided", "No image provided"
try:
# Convert PIL Image to OpenCV BGR format if necessary
if isinstance(image, str):
if not os.path.exists(image):
raise FileNotFoundError(f"Image file not found: {image}")
image = cv2.imread(image)
if image is None:
raise ValueError("Failed to read image from the provided path.")
else:
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Run ANPR detection
logger.info("Running ANPR detection")
bbox, plateNum, confidence = extractor.predict(image=image, conf=conf)
anpr_image, plateNum = annotator.draw_bbox(image.copy(), bbox, plateNum)
plate_text = ", ".join(plateNum) if plateNum else "No plate detected"
logger.info(f"ANPR result: {plate_text}")
# Run Helmet detection
logger.info("Running helmet detection")
results = helmet_model(image)
# Ensure accessing the correct results container; the first element usually holds the detection info
helmet_detected = len(results.boxes) > 0
helmet_status = "Helmet Detected" if helmet_detected else "No Helmet Detected"
logger.info(f"Helmet detection result: {helmet_status}")
# Retrieve annotated image from helmet detection
helmet_image = results.plot()
# Combine annotations from both detections
try:
combined_image = cv2.addWeighted(anpr_image, 0.5, helmet_image, 0.5, 0)
except Exception as e:
logger.warning(f"Failed to combine annotations: {str(e)}")
combined_image = helmet_image
# Convert image from BGR to RGB for proper display in Gradio
if isinstance(combined_image, np.ndarray):
combined_image = cv2.cvtColor(combined_image, cv2.COLOR_BGR2RGB)
processing_time = (datetime.now() - start_time).total_seconds()
logger.info(f"Processing completed in {processing_time:.2f} seconds")
return combined_image, plate_text, helmet_status
except Exception as e:
logger.error(f"Error processing image: {str(e)}")
return image, f"Error: {str(e)}", "Error processing image"
#Create an array of example image paths
example_images = [
os.path.join(project_root, "ANPR_IND", "sample_image2.jpg"),
os.path.join(project_root, "ANPR_IND", "sample_image3.jpg"),
os.path.join(project_root, "ANPR_IND", "sample_image5.jpg"),
os.path.join(project_root, "ANPR_IND", "sample_image6.jpg")
]
#Verify example images exist, and remove any that aren't found
for img_path in example_images.copy():
if not os.path.exists(img_path):
logger.warning(f"Example image not found: {img_path}")
example_images.remove(img_path)
def create_interface():
with gr.Blocks(title="Traffic Violation Detection System", theme=gr.themes.Soft()) as demo:
gr.Markdown("# Combined ANPR and Helmet Detection System")
gr.Markdown("Upload an image to detect license plates and check for helmet usage.")
text
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Input Image", type="pil")
conf_slider = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.45,
label="Confidence Threshold"
)
detect_button = gr.Button("Detect", variant="primary")
with gr.Column():
output_image = gr.Image(label="Annotated Image")
plate_output = gr.Textbox(label="License Plate")
helmet_output = gr.Textbox(label="Helmet Status")
# Configure example images if available
if example_images:
gr.Examples(
examples=[[img, 0.45] for img in example_images],
inputs=[input_image, conf_slider],
outputs=[output_image, plate_output, helmet_output],
fn=process_image,
cache_examples=True
)
# Set up the click event to trigger detection
detect_button.click(
fn=process_image,
inputs=[input_image, conf_slider],
outputs=[output_image, plate_output, helmet_output]
)
return demo
if name == "main":
try:
logger.info("Starting application...")
demo = create_interface()
demo.queue() # Enable request queuing if your tasks are long-running
text
# Launch application on Hugging Face Spaces
# - Use server_name="0.0.0.0" to allow external connections.
# - Do not include share=True (Spaces already provides your public URL).
demo.launch(
server_name="0.0.0.0",
server_port=7860,
debug=True
)
except Exception as e:
logger.error(f"Failed to start application: {str(e)}")
sys.exit(1) |