|
|
import gradio as gr |
|
|
from ultralytics import YOLO |
|
|
import cv2 |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
import torch |
|
|
from transformers import TrOCRProcessor, VisionEncoderDecoderModel |
|
|
from datetime import datetime |
|
|
from tensorflow.keras.models import load_model |
|
|
import os |
|
|
import tempfile |
|
|
import sqlite3 |
|
|
from sqlite3 import Error |
|
|
import re |
|
|
import time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cnn_logo_model = load_model("logo_model_cnn.h5") |
|
|
|
|
|
logo_classes = ['Alfa romeo', 'Audi', 'BMW', 'Chevrolet', 'Citroen', 'Dacia', 'Daewoo', 'Dodge', 'Ferrari', 'Fiat', 'Ford', 'Honda', 'Hyundai', 'Jaguar', 'Jeep', 'Kia', 'Lada', 'Lancia', 'Land rover', 'Lexus', 'Maserati', 'Mazda', 'Mercedes', 'Mitsubishi', 'Nissan', 'Opel', 'Peugeot', 'Porsche', 'Renault', 'Rover', 'Saab', 'Seat', 'Skoda', 'Subaru', 'Suzuki', 'Tata', 'Tesla', 'Toyota', 'Volkswagen', 'Volvo'] |
|
|
|
|
|
|
|
|
model_color = YOLO("car_color.pt") |
|
|
model_orientation = YOLO("direction_best.pt") |
|
|
model_plate_detection = YOLO("plate_detection.pt") |
|
|
model_logo_detection = YOLO("car_logo_detection.pt") |
|
|
model_characters = YOLO("character_detetion.pt") |
|
|
model_vehicle = YOLO("vehicle_detection.pt") |
|
|
|
|
|
|
|
|
|
|
|
trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed") |
|
|
trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CATEGORIES = { |
|
|
'1': "Véhicules de tourisme", |
|
|
'2': "Camions", |
|
|
'3': "Camionnettes", |
|
|
'4': "Autocars et autobus", |
|
|
'5': "Tracteurs routiers", |
|
|
'6': "Autres tracteurs", |
|
|
'7': "Véhicules spéciaux", |
|
|
'8': "Remorques et semi-remorques", |
|
|
'9': "Motocyclettes" |
|
|
} |
|
|
|
|
|
WILAYAS = { |
|
|
"01": "Adrar", "02": "Chlef", "03": "Laghouat", "04": "Oum El Bouaghi", |
|
|
"05": "Batna", "06": "Béjaïa", "07": "Biskra", "08": "Béchar", |
|
|
"09": "Blida", "10": "Bouira", "11": "Tamanrasset", "12": "Tébessa", |
|
|
"13": "Tlemcen", "14": "Tiaret", "15": "Tizi Ouzou", "16": "Alger", |
|
|
"17": "Djelfa", "18": "Jijel", "19": "Sétif", "20": "Saïda", |
|
|
"21": "Skikda", "22": "Sidi Bel Abbès", "23": "Annaba", "24": "Guelma", |
|
|
"25": "Constantine", "26": "Médéa", "27": "Mostaganem", "28": "MSila", |
|
|
"29": "Mascara", "30": "Ouargla", "31": "Oran", "32": "El Bayadh", |
|
|
"33": "Illizi", "34": "Bordj Bou Arreridj", "35": "Boumerdès", |
|
|
"36": "El Tarf", "37": "Tindouf", "38": "Tissemsilt", "39": "El Oued", |
|
|
"40": "Khenchela", "41": "Souk Ahras", "42": "Tipaza", "43": "Mila", |
|
|
"44": "Aïn Defla", "45": "Naâma", "46": "Aïn Témouchent", |
|
|
"47": "Ghardaïa", "48": "Relizane", |
|
|
"49": "El M'Ghair", "50": "El Menia", |
|
|
"51": "Ouled Djellal", "52": "Bordj Badji Mokhtar", |
|
|
"53": "Béni Abbès", "54": "Timimoun", |
|
|
"55": "Touggourt", "56": "Djanet", |
|
|
"57": "In Salah", "58": "In Guezzam" |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
shared_results = { |
|
|
"original_image": None, |
|
|
"img_rgb": None, |
|
|
"img_draw": None, |
|
|
"plate_crop_img": None, |
|
|
"logo_crop_img": None, |
|
|
"plate_with_chars_img": None, |
|
|
"trocr_char_list": [], |
|
|
"trocr_combined_text": "", |
|
|
"classification_result": "", |
|
|
"label_color": "", |
|
|
"label_orientation": "", |
|
|
"vehicle_type": "", |
|
|
"vehicle_model": "", |
|
|
"vehicle_brand": "", |
|
|
"logo_recognition_results": [], |
|
|
"current_frame": None, |
|
|
"video_path": None, |
|
|
"video_processing": False, |
|
|
"frame_count": 0, |
|
|
"total_frames": 0, |
|
|
"original_video_dimensions": None, |
|
|
"corrected_orientation": False, |
|
|
"vehicle_box": None, |
|
|
"vehicle_detected": False, |
|
|
"detection_boxes": { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
}, |
|
|
"classified_plate": None |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_plate(text): |
|
|
"""Classification des plaques algériennes (10-11 chiffres)""" |
|
|
try: |
|
|
if not text: |
|
|
return None |
|
|
|
|
|
clean_text = ''.join(c for c in text if c.isdigit()) |
|
|
|
|
|
if len(clean_text) not in {10, 11}: |
|
|
return None |
|
|
|
|
|
serie = clean_text[:3] |
|
|
middle = clean_text[3:9] |
|
|
wilaya_code = clean_text[9:] |
|
|
|
|
|
wilaya = WILAYAS.get(wilaya_code, ("", "Wilaya inonnue")) |
|
|
categorie_code = middle[0] |
|
|
annee = middle[1:3] |
|
|
|
|
|
return { |
|
|
'matricule_complet': clean_text, |
|
|
'serie': serie, |
|
|
'wilaya': (wilaya_code, wilaya[1]), |
|
|
'annee': f"20{annee}", |
|
|
'categorie': (categorie_code, CATEGORIES.get(categorie_code, ("", "Inconnue"))[1]), |
|
|
'is_algerian': True, |
|
|
'length': len(clean_text) |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Erreur classification plaque: {str(e)}") |
|
|
return None |
|
|
|
|
|
def predict_brand(image): |
|
|
"""Prédire la marque de voiture à partir de l'image""" |
|
|
try: |
|
|
img = Image.fromarray(image).resize((224, 224)) |
|
|
img_array = np.array(img) / 255.0 |
|
|
img_array = np.expand_dims(img_array, axis=0) |
|
|
|
|
|
predictions = cnn_logo_model.predict(img_array) |
|
|
predicted_class = np.argmax(predictions[0]) |
|
|
confidence = predictions[0][predicted_class] |
|
|
|
|
|
if confidence < 0.5: |
|
|
return "Marque non détectée (confiance trop faible)" |
|
|
|
|
|
brand = logo_classes[predicted_class] |
|
|
return f"{brand} (confiance: {confidence:.2f})" |
|
|
except Exception as e: |
|
|
print(f"Erreur lors de la prédiction de la marque: {str(e)}") |
|
|
return "Erreur de détection" |
|
|
|
|
|
def recognize_logo(cropped_logo): |
|
|
"""Reconnaître la marque à partir d'un logo détecté""" |
|
|
try: |
|
|
if cropped_logo.size == 0: |
|
|
return "Logo trop petit pour analyse" |
|
|
|
|
|
resized_logo = cv2.resize(np.array(cropped_logo), (128, 128)) |
|
|
rgb_logo = cv2.cvtColor(resized_logo, cv2.COLOR_BGR2RGB) |
|
|
normalized_logo = rgb_logo / 255.0 |
|
|
input_logo = np.expand_dims(normalized_logo, axis=0) |
|
|
|
|
|
predictions = cnn_logo_model.predict(input_logo, verbose=0) |
|
|
pred_index = np.argmax(predictions[0]) |
|
|
pred_label = logo_classes[pred_index] |
|
|
pred_conf = predictions[0][pred_index] |
|
|
|
|
|
if pred_conf < 0.5: |
|
|
return f"Marque incertaine: {pred_label} ({pred_conf:.2f})" |
|
|
|
|
|
return f"{pred_label} (confiance: {pred_conf:.2f})" |
|
|
except Exception as e: |
|
|
print(f"Erreur reconnaissance logo: {str(e)}") |
|
|
return "Erreur d'analyse" |
|
|
|
|
|
def draw_detection_boxes(image): |
|
|
"""Dessiner toutes les boîtes de détection sur l'image""" |
|
|
img_draw = image.copy() |
|
|
|
|
|
if shared_results["vehicle_box"]: |
|
|
x1, y1, x2, y2 = shared_results["vehicle_box"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 165, 255), 2) |
|
|
cv2.putText(img_draw, "VEHICLE", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 165, 255), 2) |
|
|
|
|
|
if shared_results["detection_boxes"]["plate"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["plate"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
cv2.putText(img_draw, "PLATE", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) |
|
|
|
|
|
if shared_results["detection_boxes"]["logo"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["logo"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (255, 0, 0), 2) |
|
|
cv2.putText(img_draw, "LOGO", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2) |
|
|
|
|
|
if shared_results["vehicle_model"]: |
|
|
model_text = shared_results["vehicle_model"].split("(")[0].strip() if "(" in shared_results["vehicle_model"] else shared_results["vehicle_model"] |
|
|
cv2.putText(img_draw, f"Model: {model_text}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) |
|
|
|
|
|
if shared_results["detection_boxes"]["color"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["color"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 0, 255), 2) |
|
|
cv2.putText(img_draw, "COLOR", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2) |
|
|
|
|
|
if shared_results["label_color"]: |
|
|
cv2.putText(img_draw, f"{shared_results['label_color']}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) |
|
|
|
|
|
if shared_results["detection_boxes"]["orientation"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["orientation"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (255, 255, 0), 2) |
|
|
cv2.putText(img_draw, "ORIENTATION", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2) |
|
|
|
|
|
if shared_results["label_orientation"]: |
|
|
cv2.putText(img_draw, f"{shared_results['label_orientation']}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2) |
|
|
|
|
|
return img_draw |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_frames(video_path, frame_skip=15): |
|
|
"""Extraire les frames d'une vidéo avec un intervalle donné""" |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
if not cap.isOpened(): |
|
|
raise gr.Error("Échec de lecture de la vidéo") |
|
|
|
|
|
frames = [] |
|
|
count = 0 |
|
|
|
|
|
while True: |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
break |
|
|
|
|
|
if count % frame_skip == 0: |
|
|
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
frames.append(frame_rgb) |
|
|
|
|
|
count += 1 |
|
|
|
|
|
cap.release() |
|
|
return frames |
|
|
|
|
|
def process_video_frame(frame): |
|
|
"""Traiter un seul frame de vidéo""" |
|
|
|
|
|
shared_results.update({ |
|
|
"original_image": frame, |
|
|
"img_rgb": frame.copy(), |
|
|
"img_draw": frame.copy(), |
|
|
"current_frame": frame, |
|
|
"corrected_orientation": False, |
|
|
"label_color": "", |
|
|
"label_orientation": "", |
|
|
"vehicle_type": "", |
|
|
"vehicle_model": "", |
|
|
"vehicle_brand": "", |
|
|
"logo_recognition_results": [], |
|
|
"trocr_char_list": [], |
|
|
"trocr_combined_text": "", |
|
|
"classification_result": "", |
|
|
"vehicle_box": None, |
|
|
"vehicle_detected": False, |
|
|
"detection_boxes": { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
}, |
|
|
"plate_crop_img": None, |
|
|
"logo_crop_img": None, |
|
|
"plate_with_chars_img": None, |
|
|
"classified_plate": None |
|
|
}) |
|
|
|
|
|
|
|
|
detect_vehicle() |
|
|
detect_color() |
|
|
detect_orientation() |
|
|
detect_logo_and_model() |
|
|
detect_plate() |
|
|
classify_plate_number() |
|
|
|
|
|
|
|
|
return { |
|
|
"processed_image": Image.fromarray(shared_results["img_draw"]), |
|
|
"color": shared_results["label_color"], |
|
|
"orientation": shared_results["label_orientation"], |
|
|
"brand": shared_results["vehicle_brand"], |
|
|
"model": shared_results["vehicle_model"], |
|
|
"plate_text": shared_results["trocr_combined_text"], |
|
|
"plate_info": shared_results["classified_plate"], |
|
|
"vehicle_type": shared_results["vehicle_type"] |
|
|
} |
|
|
|
|
|
def process_video(video_path): |
|
|
"""Traiter une vidéo complète et retourner les meilleurs résultats""" |
|
|
frames = extract_frames(video_path) |
|
|
if not frames: |
|
|
raise gr.Error("Aucun frame valide trouvé dans la vidéo") |
|
|
|
|
|
|
|
|
all_results = [] |
|
|
processed_frames = [] |
|
|
|
|
|
for frame in frames: |
|
|
frame_results = process_video_frame(frame) |
|
|
all_results.append(frame_results) |
|
|
processed_frames.append(frame_results["processed_image"]) |
|
|
|
|
|
|
|
|
best_frame = None |
|
|
best_score = -1 |
|
|
|
|
|
for result in all_results: |
|
|
score = 0 |
|
|
if result["plate_info"] and result["plate_info"]["is_algerian"]: |
|
|
score += 2 |
|
|
if result["brand"] and "non détectée" not in result["brand"]: |
|
|
score += 1 |
|
|
if result["color"]: |
|
|
score += 1 |
|
|
|
|
|
if score > best_score: |
|
|
best_score = score |
|
|
best_frame = result |
|
|
|
|
|
if not best_frame: |
|
|
best_frame = all_results[0] |
|
|
|
|
|
return processed_frames, best_frame |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_image(image_path): |
|
|
"""Charger et préparer l'image de base""" |
|
|
if isinstance(image_path, str): |
|
|
img = cv2.imread(image_path) |
|
|
else: |
|
|
img = cv2.cvtColor(image_path, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
if img is None: |
|
|
raise gr.Error("Échec de lecture de l'image") |
|
|
|
|
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
img_draw = img_rgb.copy() |
|
|
|
|
|
shared_results.update({ |
|
|
"original_image": img, |
|
|
"img_rgb": img_rgb, |
|
|
"img_draw": img_draw, |
|
|
"video_processing": False, |
|
|
"corrected_orientation": False, |
|
|
"detection_boxes": { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
} |
|
|
}) |
|
|
|
|
|
return Image.fromarray(img_rgb) |
|
|
|
|
|
def detect_vehicle(): |
|
|
"""Détecter le véhicule principal dans l'image""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None |
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
results_vehicle = model_vehicle(img_to_process) |
|
|
img_with_boxes = shared_results["img_rgb"].copy() |
|
|
vehicle_detected = False |
|
|
|
|
|
for r in results_vehicle: |
|
|
if hasattr(r, 'boxes') and r.boxes and hasattr(r.boxes, 'cls') and len(r.boxes.cls) > 0: |
|
|
largest_box = None |
|
|
max_area = 0 |
|
|
for box in r.boxes: |
|
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) |
|
|
area = (x2 - x1) * (y2 - y1) |
|
|
if area > max_area: |
|
|
max_area = area |
|
|
largest_box = (x1, y1, x2, y2) |
|
|
|
|
|
if largest_box: |
|
|
x1, y1, x2, y2 = largest_box |
|
|
shared_results["vehicle_box"] = largest_box |
|
|
shared_results["vehicle_detected"] = True |
|
|
vehicle_detected = True |
|
|
|
|
|
cv2.rectangle(img_with_boxes, (x1, y1), (x2, y2), (0, 165, 255), 2) |
|
|
cv2.putText(img_with_boxes, "VEHICLE", (x1, y1 - 10), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 165, 255), 2) |
|
|
|
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
if vehicle_detected: |
|
|
return "Véhicule détecté - Vous pouvez maintenant détecter la couleur", Image.fromarray(img_with_boxes) |
|
|
else: |
|
|
shared_results["vehicle_box"] = None |
|
|
shared_results["vehicle_detected"] = False |
|
|
return "Aucun véhicule détecté - La détection de couleur sera moins précise", Image.fromarray(img_with_boxes) |
|
|
|
|
|
def detect_color(): |
|
|
"""Détecter la couleur du véhicule""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None |
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
x1, y1, x2, y2 = shared_results["vehicle_box"] |
|
|
vehicle_roi = img_to_process[y1:y2, x1:x2] |
|
|
results_color = model_color(vehicle_roi) |
|
|
else: |
|
|
results_color = model_color(img_to_process) |
|
|
|
|
|
img_with_boxes = shared_results["img_draw"].copy() if shared_results["img_draw"] is not None else img_to_process.copy() |
|
|
color_detected = False |
|
|
|
|
|
for r in results_color: |
|
|
if hasattr(r, 'boxes') and r.boxes and hasattr(r.boxes, 'cls') and len(r.boxes.cls) > 0: |
|
|
cls = int(r.boxes.cls[0]) |
|
|
shared_results["label_color"] = r.names[cls] |
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
abs_x1 = vx1 + x1 |
|
|
abs_y1 = vy1 + y1 |
|
|
abs_x2 = vx1 + x2 |
|
|
abs_y2 = vy1 + y2 |
|
|
shared_results["detection_boxes"]["color"] = (abs_x1, abs_y1, abs_x2, abs_y2) |
|
|
else: |
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
shared_results["detection_boxes"]["color"] = (x1, y1, x2, y2) |
|
|
|
|
|
color_detected = True |
|
|
|
|
|
img_with_boxes = draw_detection_boxes(img_with_boxes) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
if color_detected: |
|
|
return f"Couleur: {shared_results['label_color']}", Image.fromarray(img_with_boxes) |
|
|
else: |
|
|
return "Couleur non détectée", Image.fromarray(img_with_boxes) |
|
|
|
|
|
def detect_orientation(): |
|
|
"""Détecter l'orientation du véhicule""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None |
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results["video_processing"]: |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
shared_results["corrected_orientation"] = True |
|
|
|
|
|
results_orientation = model_orientation(img_to_process) |
|
|
for r in results_orientation: |
|
|
if hasattr(r, 'boxes') and r.boxes and hasattr(r.boxes, 'cls') and len(r.boxes.cls) > 0: |
|
|
cls = int(r.boxes.cls[0]) |
|
|
shared_results["label_orientation"] = r.names[cls] |
|
|
|
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
shared_results["detection_boxes"]["orientation"] = (x1, y1, x2, y2) |
|
|
|
|
|
img_with_boxes = draw_detection_boxes(shared_results["img_rgb"]) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
return ( |
|
|
f"Orientation: {shared_results['label_orientation']}" if shared_results['label_orientation'] else "Orientation non détectée", |
|
|
Image.fromarray(img_with_boxes) |
|
|
) |
|
|
def detect_logo_and_model(): |
|
|
"""Détecter et reconnaître le logo et le modèle du véhicule""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None, None, None, None |
|
|
|
|
|
shared_results["logo_recognition_results"] = [] |
|
|
img_to_process = shared_results["img_rgb"] |
|
|
detected_model = "Modèle non détecté" |
|
|
|
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
roi = img_to_process[vy1:vy2, vx1:vx2] |
|
|
results_logo = model_logo_detection(roi) |
|
|
else: |
|
|
results_logo = model_logo_detection(img_to_process) |
|
|
|
|
|
if results_logo and results_logo[0].boxes: |
|
|
for box in results_logo[0].boxes: |
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
rx1, ry1, rx2, ry2 = map(int, box.xyxy[0]) |
|
|
abs_x1 = vx1 + rx1 |
|
|
abs_y1 = vy1 + ry1 |
|
|
abs_x2 = vx1 + rx2 |
|
|
abs_y2 = vy1 + ry2 |
|
|
else: |
|
|
abs_x1, abs_y1, abs_x2, abs_y2 = map(int, box.xyxy[0]) |
|
|
|
|
|
shared_results["detection_boxes"]["logo"] = (abs_x1, abs_y1, abs_x2, abs_y2) |
|
|
logo_crop = img_to_process[abs_y1:abs_y2, abs_x1:abs_x2] |
|
|
shared_results["logo_crop_img"] = Image.fromarray(logo_crop) |
|
|
|
|
|
logo_recognition = recognize_logo(shared_results["logo_crop_img"]) |
|
|
shared_results["logo_recognition_results"].append(logo_recognition) |
|
|
|
|
|
if not shared_results["vehicle_brand"] or "confiance" not in shared_results["vehicle_brand"]: |
|
|
shared_results["vehicle_brand"] = logo_recognition |
|
|
|
|
|
if logo_recognition and shared_results["logo_crop_img"]: |
|
|
detected_model = predict_car_model(logo_recognition, shared_results["logo_crop_img"]) |
|
|
shared_results["vehicle_model"] = detected_model |
|
|
|
|
|
img_with_boxes = draw_detection_boxes(shared_results["img_rgb"]) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
if not shared_results["vehicle_brand"] or "incertaine" in shared_results["vehicle_brand"] or "Erreur" in shared_results["vehicle_brand"]: |
|
|
global_brand = predict_brand(img_to_process) |
|
|
if global_brand and "non détectée" not in global_brand: |
|
|
shared_results["vehicle_brand"] = global_brand |
|
|
|
|
|
logo_results_text = " | ".join(shared_results["logo_recognition_results"]) if shared_results["logo_recognition_results"] else "Aucun logo reconnu" |
|
|
|
|
|
return ( |
|
|
f"Marque: {shared_results['vehicle_brand']}" if shared_results['vehicle_brand'] else "Marque non détectée", |
|
|
f"Modèle: {shared_results['vehicle_model']}" if shared_results['vehicle_model'] else "Modèle non détecté", |
|
|
f"Reconnaissance logo: {logo_results_text}", |
|
|
Image.fromarray(img_with_boxes), |
|
|
shared_results["logo_crop_img"] |
|
|
) |
|
|
|
|
|
def detect_plate(): |
|
|
"""Détecter la plaque d'immatriculation et reconnaître les caractères""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None, None, None |
|
|
|
|
|
shared_results["trocr_char_list"] = [] |
|
|
shared_results["trocr_combined_text"] = "" |
|
|
img_to_process = shared_results["img_rgb"] |
|
|
|
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
roi = img_to_process[vy1:vy2, vx1:vx2] |
|
|
results_plate = model_plate_detection(roi) |
|
|
else: |
|
|
results_plate = model_plate_detection(img_to_process) |
|
|
|
|
|
if results_plate and results_plate[0].boxes: |
|
|
for box in results_plate[0].boxes: |
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
rx1, ry1, rx2, ry2 = map(int, box.xyxy[0]) |
|
|
abs_x1 = vx1 + rx1 |
|
|
abs_y1 = vy1 + ry1 |
|
|
abs_x2 = vx1 + rx2 |
|
|
abs_y2 = vy1 + ry2 |
|
|
else: |
|
|
abs_x1, abs_y1, abs_x2, abs_y2 = map(int, box.xyxy[0]) |
|
|
|
|
|
shared_results["detection_boxes"]["plate"] = (abs_x1, abs_y1, abs_x2, abs_y2) |
|
|
plate_crop = img_to_process[abs_y1:abs_y2, abs_x1:abs_x2] |
|
|
shared_results["plate_crop_img"] = Image.fromarray(plate_crop) |
|
|
plate_for_char_draw = plate_crop.copy() |
|
|
|
|
|
results_chars = model_characters(plate_crop) |
|
|
char_boxes = [] |
|
|
for r in results_chars: |
|
|
if r.boxes: |
|
|
for box in r.boxes: |
|
|
x1c, y1c, x2c, y2c = map(int, box.xyxy[0]) |
|
|
char_boxes.append(((x1c, y1c, x2c, y2c), x1c)) |
|
|
|
|
|
char_boxes.sort(key=lambda x: x[1]) |
|
|
|
|
|
for i, (coords, _) in enumerate(char_boxes): |
|
|
x1c, y1c, x2c, y2c = coords |
|
|
char_crop = plate_crop[y1c:y2c, x1c:x2c] |
|
|
char_pil = Image.fromarray(char_crop).convert("RGB") |
|
|
|
|
|
try: |
|
|
inputs = trocr_processor(images=char_pil, return_tensors="pt").pixel_values |
|
|
generated_ids = trocr_model.generate(inputs) |
|
|
predicted_char = trocr_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() |
|
|
shared_results["trocr_char_list"].append(predicted_char) |
|
|
except: |
|
|
shared_results["trocr_char_list"].append("?") |
|
|
|
|
|
cv2.rectangle(plate_for_char_draw, (x1c, y1c), (x2c, y2c), (255, 0, 255), 1) |
|
|
cv2.putText(plate_for_char_draw, predicted_char, (x1c, y1c - 5), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 1) |
|
|
|
|
|
shared_results["plate_with_chars_img"] = Image.fromarray(plate_for_char_draw) |
|
|
shared_results["trocr_combined_text"] = ''.join(shared_results["trocr_char_list"]) |
|
|
break |
|
|
|
|
|
img_with_boxes = draw_detection_boxes(shared_results["img_rgb"]) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
return ( |
|
|
Image.fromarray(img_with_boxes), |
|
|
shared_results["plate_crop_img"], |
|
|
shared_results["plate_with_chars_img"], |
|
|
shared_results["trocr_char_list"] |
|
|
) |
|
|
|
|
|
def classify_plate_number(): |
|
|
"""Classifier le numéro de plaque détecté""" |
|
|
if not shared_results.get("trocr_combined_text"): |
|
|
return ( |
|
|
"Aucun texte de plaque détecté", |
|
|
"Type: Non déterminé", |
|
|
"❌ Aucune plaque valide", |
|
|
"Action: Vérifier la qualité de l'image" |
|
|
) |
|
|
|
|
|
plate_text = shared_results["trocr_combined_text"] |
|
|
digits = ''.join(c for c in plate_text if c.isdigit()) |
|
|
|
|
|
if len(digits) not in {10, 11}: |
|
|
return ( |
|
|
f"Texte rejeté: {plate_text} ({len(digits)} chiffres)", |
|
|
"Type: Non déterminé", |
|
|
f"❌ NON ALGÉRIEN (format {len(digits)} chiffres)", |
|
|
"Action: Contrôle requis" |
|
|
) |
|
|
|
|
|
if any(c.isalpha() for c in plate_text): |
|
|
return ( |
|
|
f"Texte rejeté: {plate_text} (contient des lettres)", |
|
|
"Type: Non déterminé", |
|
|
"❌ NON ALGÉRIEN (lettres détectées)", |
|
|
"Action: Contrôle immédiat" |
|
|
) |
|
|
|
|
|
plate_info = classify_plate(plate_text) |
|
|
|
|
|
if not plate_info: |
|
|
return ( |
|
|
f"Texte non classifiable: {plate_text}", |
|
|
"Type: Non déterminé", |
|
|
"❌ Format invalide", |
|
|
"Action: Vérifier manuellement" |
|
|
) |
|
|
|
|
|
result = [ |
|
|
f"Plaque: {plate_info['matricule_complet']}", |
|
|
f"Longueur: {plate_info['length']} chiffres", |
|
|
f"Wilaya: {plate_info['wilaya'][1]} ({plate_info['wilaya'][0]})", |
|
|
f"Année: {plate_info['annee']}", |
|
|
f"Catégorie: {plate_info['categorie'][1]}", |
|
|
f"Série: {plate_info['serie']}" |
|
|
] |
|
|
|
|
|
shared_results["classified_plate"] = plate_info |
|
|
shared_results["vehicle_type"] = plate_info['categorie'][1] |
|
|
|
|
|
|
|
|
db_check = check_vehicle(plate_info['matricule_complet']) |
|
|
db_status = "\nStatut DB: " + db_check[1] if db_check[0] else "" |
|
|
|
|
|
return ( |
|
|
'\n'.join(result) + db_status, |
|
|
f"Type: {plate_info['categorie'][1]}", |
|
|
"✅ PLAQUE ALGÉRIENNE VALIDE", |
|
|
"Action: Vérification standard" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DB_PATH = "vehicules_database.db" |
|
|
TIME_PATTERN = re.compile(r'^\d{2}:\d{2}-\d{2}:\d{2}$') |
|
|
|
|
|
def create_connection(): |
|
|
"""Créer une connexion à la base SQLite""" |
|
|
conn = None |
|
|
try: |
|
|
conn = sqlite3.connect(DB_PATH) |
|
|
return conn |
|
|
except Error as e: |
|
|
print(f"Erreur de connexion à SQLite: {e}") |
|
|
return conn |
|
|
|
|
|
def init_database(): |
|
|
"""Initialiser la base de données si elle n'existe pas""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS vehicules ( |
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
|
plaque TEXT NOT NULL UNIQUE, |
|
|
marque TEXT, |
|
|
modele TEXT, |
|
|
couleur TEXT, |
|
|
statut TEXT, |
|
|
plage_horaire TEXT, |
|
|
date_enregistrement TEXT |
|
|
) |
|
|
""") |
|
|
conn.commit() |
|
|
except Error as e: |
|
|
print(f"Erreur création table: {e}") |
|
|
finally: |
|
|
conn.close() |
|
|
|
|
|
def check_vehicle(plate_text): |
|
|
"""Vérifier si un véhicule existe dans la base""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute("SELECT statut, plage_horaire FROM vehicules WHERE plaque = ?", (plate_text,)) |
|
|
result = cursor.fetchone() |
|
|
|
|
|
if result: |
|
|
return True, f"Statut: {result[0]} | Accès: {result[1]}" |
|
|
return False, "Véhicule non enregistré" |
|
|
except Error as e: |
|
|
print(f"Erreur lecture base: {e}") |
|
|
return False, "Erreur base de données" |
|
|
finally: |
|
|
conn.close() |
|
|
return False, "Erreur de connexion" |
|
|
|
|
|
def save_vehicle(plate_info, color, model, brand, status, time_range): |
|
|
"""Enregistrer un nouveau véhicule dans la base""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
plate_number = str(plate_info['matricule_complet']).strip() |
|
|
clean_brand = brand.split('(')[0].strip() if '(' in brand else brand |
|
|
clean_model = model.split('(')[0].strip() if '(' in model else model |
|
|
|
|
|
cursor = conn.cursor() |
|
|
cursor.execute("SELECT 1 FROM vehicules WHERE plaque = ?", (plate_number,)) |
|
|
if cursor.fetchone(): |
|
|
return False, "Véhicule déjà existant" |
|
|
|
|
|
cursor.execute(""" |
|
|
INSERT INTO vehicules (plaque, marque, modele, couleur, statut, plage_horaire, date_enregistrement) |
|
|
VALUES (?, ?, ?, ?, ?, ?, ?) |
|
|
""", ( |
|
|
plate_number, |
|
|
clean_brand, |
|
|
clean_model, |
|
|
color, |
|
|
status, |
|
|
time_range, |
|
|
datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
)) |
|
|
|
|
|
conn.commit() |
|
|
return True, "Enregistrement réussi" |
|
|
except Error as e: |
|
|
return False, f"Erreur enregistrement: {e}" |
|
|
finally: |
|
|
conn.close() |
|
|
return False, "Erreur de connexion" |
|
|
|
|
|
def is_access_allowed(plate_text): |
|
|
"""Vérifier si l'accès est autorisé selon la plage horaire""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute("SELECT statut, plage_horaire FROM vehicules WHERE plaque = ?", (plate_text,)) |
|
|
vehicle = cursor.fetchone() |
|
|
|
|
|
if not vehicle: |
|
|
return False |
|
|
|
|
|
if vehicle[0] == "Non Autorisé": |
|
|
return False |
|
|
|
|
|
if vehicle[1] == "24/24": |
|
|
return True |
|
|
|
|
|
current_time = datetime.now().time() |
|
|
start_str, end_str = vehicle[1].split('-') |
|
|
start = time(*map(int, start_str.split(':'))) |
|
|
end = time(*map(int, end_str.split(':'))) |
|
|
|
|
|
return start <= current_time <= end |
|
|
except Error as e: |
|
|
print(f"Erreur vérification accès: {e}") |
|
|
return False |
|
|
finally: |
|
|
conn.close() |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="🚗 Système de Reconnaissance de Véhicules Algériens", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown(""" |
|
|
# 🚗 Système de Reconnaissance de Véhicules Algériens |
|
|
*Détection de plaque d'immatriculation, logo, couleur et autres caractéristiques du véhicule* |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
gr.Markdown("### 📁 Chargement des données") |
|
|
input_type = gr.Radio(["Image", "Vidéo"], label="Type d'entrée", value="Image", interactive=True) |
|
|
file_input = gr.File(label="Télécharger un fichier", file_types=["image", "video"]) |
|
|
load_btn = gr.Button("Charger le fichier", variant="primary") |
|
|
|
|
|
|
|
|
process_video_btn = gr.Button("Traiter toute la vidéo", variant="primary", visible=False) |
|
|
|
|
|
|
|
|
gr.Markdown("### 🔍 Analyse Frame par Frame") |
|
|
with gr.Row(): |
|
|
detect_vehicle_btn = gr.Button("Détection de véhicule", variant="secondary") |
|
|
detect_color_btn = gr.Button("Détection de couleur", variant="secondary") |
|
|
|
|
|
with gr.Row(): |
|
|
detect_orientation_btn = gr.Button("Détection de l'orientation", variant="secondary") |
|
|
detect_logo_btn = gr.Button("Détecter logo", variant="secondary") |
|
|
|
|
|
with gr.Row(): |
|
|
detect_plate_btn = gr.Button("Détection de plaque", variant="secondary") |
|
|
classify_plate_btn = gr.Button("Classifier plaque", variant="primary") |
|
|
|
|
|
next_frame_btn = gr.Button("Frame suivant", visible=False) |
|
|
|
|
|
|
|
|
gr.Markdown("### 🔐 Gestion d'Accès") |
|
|
check_btn = gr.Button("Vérifier véhicule", variant="secondary") |
|
|
with gr.Row(visible=False) as access_form: |
|
|
access_status = gr.Radio( |
|
|
["Autorisé", "Non Autorisé"], |
|
|
label="Statut d'accès" |
|
|
) |
|
|
time_range = gr.Dropdown( |
|
|
["24/24", "8:00-16:00", "9:00-17:00", "Personnalisé..."], |
|
|
label="Plage horaire" |
|
|
) |
|
|
custom_time = gr.Textbox( |
|
|
visible=False, |
|
|
placeholder="HH:MM-HH:MM", |
|
|
label="Plage horaire personnalisée" |
|
|
) |
|
|
save_btn = gr.Button("Enregistrer véhicule", variant="primary") |
|
|
access_output = gr.Textbox(label="Résultat vérification") |
|
|
|
|
|
|
|
|
with gr.Column(scale=2): |
|
|
|
|
|
with gr.Tab("📸 Visualisation"): |
|
|
original_image = gr.Image(label="Image originale", interactive=False) |
|
|
processed_image = gr.Image(label="Image annotée", interactive=False) |
|
|
status_output = gr.Textbox(label="Statut", interactive=False) |
|
|
|
|
|
|
|
|
video_output = gr.Gallery(label="Frames traités", visible=False) |
|
|
video_details = gr.Textbox(label="Détails vidéo", visible=False) |
|
|
|
|
|
|
|
|
with gr.Tab("📊 Résultats complets"): |
|
|
|
|
|
with gr.Accordion("🎥 Résultats de la vidéo", open=True): |
|
|
video_results = gr.DataFrame( |
|
|
headers=["Frame", "Véhicule", "Action", "Couleur", "Logo", "Orientation", "Plaque", "Wilaya", "Catégorie", "Année"], |
|
|
datatype=["str", "str", "str", "str", "str", "str", "str", "str", "str", "str"], |
|
|
col_count=(10, "fixed"), |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("🚗 Caractéristiques du véhicule", open=True): |
|
|
with gr.Row(): |
|
|
color_output = gr.Textbox(label="Couleur") |
|
|
orientation_output = gr.Textbox(label="Orientation") |
|
|
|
|
|
with gr.Row(): |
|
|
logo_output = gr.Textbox(label="Marque détectée") |
|
|
logo_details = gr.Textbox(label="Détails reconnaissance") |
|
|
|
|
|
logo_image = gr.Image(label="Logo détecté", visible=False) |
|
|
|
|
|
with gr.Accordion("🔢 Plaque d'immatriculation", open=False): |
|
|
with gr.Row(): |
|
|
plate_classification = gr.Textbox(label="Classification", lines=5) |
|
|
vehicle_type_output = gr.Textbox(label="Type de véhicule") |
|
|
|
|
|
with gr.Row(): |
|
|
plate_image = gr.Image(label="Plaque détectée") |
|
|
plate_chars_image = gr.Image(label="Caractères détectés") |
|
|
|
|
|
plate_chars_list = gr.Textbox(label="Texte détecté") |
|
|
|
|
|
|
|
|
def update_input_visibility(input_type): |
|
|
return [ |
|
|
gr.Button(visible=input_type == "Vidéo"), |
|
|
gr.Button(visible=input_type == "Vidéo"), |
|
|
gr.Gallery(visible=input_type == "Vidéo"), |
|
|
gr.DataFrame(visible=input_type == "Vidéo"), |
|
|
gr.Image(visible=input_type != "Vidéo"), |
|
|
gr.Textbox(visible=input_type != "Vidéo") |
|
|
] |
|
|
|
|
|
def toggle_time_range(choice): |
|
|
"""Afficher/masquer le champ personnalisé""" |
|
|
if choice == "Personnalisé...": |
|
|
return gr.Textbox(visible=True) |
|
|
return gr.Textbox(visible=False) |
|
|
|
|
|
def verify_vehicle(): |
|
|
"""Vérifier l'existence du véhicule""" |
|
|
if not shared_results.get("trocr_combined_text"): |
|
|
raise gr.Error("Aucune plaque détectée") |
|
|
|
|
|
plate_info = classify_plate(shared_results["trocr_combined_text"]) |
|
|
if not plate_info: |
|
|
raise gr.Error("Plaque non valide") |
|
|
|
|
|
exists, message = check_vehicle(plate_info['matricule_complet']) |
|
|
|
|
|
if exists: |
|
|
allowed = "✅ ACCÈS AUTORISÉ" if is_access_allowed(plate_info['matricule_complet']) else "❌ ACCÈS REFUSÉ" |
|
|
return { |
|
|
access_output: f"{message}\n{allowed}", |
|
|
access_form: gr.update(visible=False), |
|
|
save_btn: gr.update(interactive=False) |
|
|
} |
|
|
else: |
|
|
return { |
|
|
access_output: message, |
|
|
access_form: gr.update(visible=True), |
|
|
save_btn: gr.update(interactive=True) |
|
|
} |
|
|
|
|
|
def save_vehicle_info(status, time_choice, custom_time_input): |
|
|
"""Enregistrer les informations du véhicule""" |
|
|
if not shared_results.get("classified_plate"): |
|
|
raise gr.Error("Aucune information de plaque disponible") |
|
|
|
|
|
plate_info = shared_results["classified_plate"] |
|
|
|
|
|
if time_choice == "Personnalisé...": |
|
|
if not TIME_PATTERN.match(custom_time_input): |
|
|
raise gr.Error("Format horaire invalide. Utilisez HH:MM-HH:MM") |
|
|
time_range = custom_time_input |
|
|
else: |
|
|
time_range = time_choice |
|
|
|
|
|
brand = shared_results.get("vehicle_brand", "Inconnu") |
|
|
model = shared_results.get("vehicle_model", "Inconnu") |
|
|
color = shared_results.get("label_color", "Inconnu") |
|
|
|
|
|
success, message = save_vehicle( |
|
|
plate_info, |
|
|
color, |
|
|
model, |
|
|
brand, |
|
|
status, |
|
|
time_range |
|
|
) |
|
|
|
|
|
if not success: |
|
|
raise gr.Error(message) |
|
|
|
|
|
return { |
|
|
access_output: message, |
|
|
access_form: gr.update(visible=False), |
|
|
save_btn: gr.update(interactive=False) |
|
|
} |
|
|
|
|
|
def process_load(input_type, files): |
|
|
"""Charger un fichier image ou vidéo""" |
|
|
if files is None: |
|
|
raise gr.Error("Veuillez sélectionner un fichier") |
|
|
|
|
|
file_path = files.name if hasattr(files, 'name') else files |
|
|
|
|
|
if input_type == "Image" and not file_path.lower().endswith(('.png', '.jpg', '.jpeg')): |
|
|
raise gr.Error("Veuillez sélectionner une image valide (PNG, JPG, JPEG)") |
|
|
elif input_type == "Vidéo" and not file_path.lower().endswith(('.mp4', '.avi', '.mov')): |
|
|
raise gr.Error("Veuillez sélectionner une vidéo valide (MP4, AVI, MOV)") |
|
|
|
|
|
if input_type == "Vidéo": |
|
|
cap = cv2.VideoCapture(file_path) |
|
|
success, frame = cap.read() |
|
|
cap.release() |
|
|
if not success: |
|
|
raise gr.Error("Échec de lecture de la vidéo") |
|
|
|
|
|
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
return [ |
|
|
Image.fromarray(img_rgb), |
|
|
"Vidéo chargée - Cliquez sur 'Traiter toute la vidéo' ou analysez frame par frame", |
|
|
gr.Button(visible=True), |
|
|
gr.Button(visible=True) |
|
|
] |
|
|
else: |
|
|
img = cv2.imread(file_path) |
|
|
if img is None: |
|
|
raise gr.Error("Échec de lecture de l'image") |
|
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
return [ |
|
|
Image.fromarray(img_rgb), |
|
|
"Image chargée - Cliquez sur les boutons pour analyser", |
|
|
gr.Button(visible=False), |
|
|
gr.Button(visible=False) |
|
|
] |
|
|
|
|
|
def handle_video_processing(video_path): |
|
|
"""Gérer le traitement de la vidéo complète""" |
|
|
frames, best_frame = process_video(video_path) |
|
|
|
|
|
return { |
|
|
video_output: gr.Gallery(value=frames, visible=True), |
|
|
processed_image: best_frame["processed_image"], |
|
|
color_output: f"Couleur: {best_frame['color']}" if best_frame['color'] else "Couleur non détectée", |
|
|
orientation_output: f"Orientation: {best_frame['orientation']}" if best_frame['orientation'] else "Orientation non détectée", |
|
|
logo_output: f"Marque: {best_frame['brand']}" if best_frame['brand'] else "Marque non détectée", |
|
|
model_output: f"Modèle: {best_frame['model']}" if best_frame['model'] else "Modèle non détecté", |
|
|
plate_classification: best_frame["plate_info"] if best_frame["plate_info"] else "Plaque non classifiée", |
|
|
vehicle_type_output: f"Type: {best_frame['vehicle_type']}" if best_frame['vehicle_type'] else "Type non détecté" |
|
|
} |
|
|
|
|
|
|
|
|
input_type.change( |
|
|
fn=update_input_visibility, |
|
|
inputs=input_type, |
|
|
outputs=[next_frame_btn, process_video_btn, video_output, video_results, processed_image, status_output] |
|
|
) |
|
|
|
|
|
time_range.change( |
|
|
fn=toggle_time_range, |
|
|
inputs=time_range, |
|
|
outputs=custom_time |
|
|
) |
|
|
|
|
|
load_btn.click( |
|
|
fn=process_load, |
|
|
inputs=[input_type, file_input], |
|
|
outputs=[original_image, status_output, next_frame_btn, process_video_btn] |
|
|
) |
|
|
|
|
|
process_video_btn.click( |
|
|
fn=handle_video_processing, |
|
|
inputs=file_input, |
|
|
outputs=[video_output, processed_image, color_output, orientation_output, |
|
|
logo_output, logo_details, plate_classification, vehicle_type_output] |
|
|
) |
|
|
|
|
|
check_btn.click( |
|
|
fn=verify_vehicle, |
|
|
outputs=[access_output, access_form, save_btn] |
|
|
) |
|
|
|
|
|
save_btn.click( |
|
|
fn=save_vehicle_info, |
|
|
inputs=[access_status, time_range, custom_time], |
|
|
outputs=[access_output, access_form, save_btn] |
|
|
) |
|
|
|
|
|
|
|
|
detect_vehicle_btn.click( |
|
|
fn=detect_vehicle, |
|
|
outputs=[status_output, processed_image] |
|
|
) |
|
|
|
|
|
detect_color_btn.click( |
|
|
fn=detect_color, |
|
|
outputs=[color_output, processed_image] |
|
|
) |
|
|
|
|
|
detect_orientation_btn.click( |
|
|
fn=detect_orientation, |
|
|
outputs=[orientation_output, processed_image] |
|
|
) |
|
|
|
|
|
detect_logo_btn.click( |
|
|
fn=detect_logo_and_model, |
|
|
outputs=[logo_output, logo_details, logo_details, processed_image, logo_image] |
|
|
) |
|
|
|
|
|
detect_plate_btn.click( |
|
|
fn=detect_plate, |
|
|
outputs=[processed_image, plate_image, plate_chars_image, plate_chars_list] |
|
|
) |
|
|
|
|
|
classify_plate_btn.click( |
|
|
fn=classify_plate_number, |
|
|
outputs=[plate_classification, vehicle_type_output, status_output, access_output] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
init_database() |
|
|
demo.launch(share=True) |
|
|
|