|
|
|
|
|
import gradio as gr |
|
|
from ultralytics import YOLO |
|
|
import cv2 |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
import torch |
|
|
from transformers import TrOCRProcessor, VisionEncoderDecoderModel |
|
|
from datetime import datetime |
|
|
from tensorflow.keras.models import load_model |
|
|
import os |
|
|
import tempfile |
|
|
|
|
|
import sqlite3 |
|
|
from sqlite3 import Error |
|
|
import re |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cnn_logo_model = load_model('logo_model_cnn.h5') |
|
|
|
|
|
|
|
|
train_dir = 'train' |
|
|
logo_classes = sorted([d for d in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, d))]) |
|
|
print(f"Classes de logos chargées ({len(logo_classes)}): {logo_classes}") |
|
|
|
|
|
|
|
|
model_color = YOLO("car_color.pt") |
|
|
model_orientation = YOLO("direction_best.pt") |
|
|
model_plate_detection = YOLO("plate_detection.pt") |
|
|
model_logo_detection = YOLO("car_logo_detection.pt") |
|
|
model_characters = YOLO("character_detetion.pt") |
|
|
model_vehicle = YOLO("vehicle_detection.pt") |
|
|
|
|
|
|
|
|
trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed") |
|
|
trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") |
|
|
|
|
|
|
|
|
model_per_brand = { |
|
|
'nissan': load_model('nissan_model_final2.keras'), |
|
|
'chevrolet': load_model('chevrolet_model_final2.keras'), |
|
|
} |
|
|
|
|
|
model_labels = { |
|
|
'nissan': ['nissan-altima', 'nissan-armada', 'nissan-datsun', 'nissan-maxima', 'nissan-navara', 'nissan-patrol', 'nissan-sunny'], |
|
|
'chevrolet': ['chevrolet-aveo', 'chevrolet-impala', 'chevrolet-malibu', 'chevrolet-silverado', 'chevrolet-tahoe', 'chevrolet-traverse'], |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CATEGORIES = { |
|
|
'1': "Véhicules de tourisme", |
|
|
'2': "Camions", |
|
|
'3': "Camionnettes", |
|
|
'4': "Autocars et autobus", |
|
|
'5': "Tracteurs routiers", |
|
|
'6': "Autres tracteurs", |
|
|
'7': "Véhicules spéciaux", |
|
|
'8': "Remorques et semi-remorques", |
|
|
'9': "Motocyclettes" |
|
|
} |
|
|
|
|
|
WILAYAS = { |
|
|
"01": "Adrar", "02": "Chlef", "03": "Laghouat", "04": "Oum El Bouaghi", |
|
|
"05": "Batna", "06": "Béjaïa", "07": "Biskra", "08": "Béchar", |
|
|
"09": "Blida", "10": "Bouira", "11": "Tamanrasset", "12": "Tébessa", |
|
|
"13": "Tlemcen", "14": "Tiaret", "15": "Tizi Ouzou", "16": "Alger", |
|
|
"17": "Djelfa", "18": "Jijel", "19": "Sétif", "20": "Saïda", |
|
|
"21": "Skikda", "22": "Sidi Bel Abbès", "23": "Annaba", "24": "Guelma", |
|
|
"25": "Constantine", "26": "Médéa", "27": "Mostaganem", "28": "MSila", |
|
|
"29": "Mascara", "30": "Ouargla", "31": "Oran", "32": "El Bayadh", |
|
|
"33": "Illizi", "34": "Bordj Bou Arreridj", "35": "Boumerdès", |
|
|
"36": "El Tarf", "37": "Tindouf", "38": "Tissemsilt", "39": "El Oued", |
|
|
"40": "Khenchela", "41": "Souk Ahras", "42": "Tipaza", "43": "Mila", |
|
|
"44": "Aïn Defla", "45": "Naâma", "46": "Aïn Témouchent", |
|
|
"47": "Ghardaïa", "48": "Relizane", |
|
|
"49": "El M'Ghair", "50": "El Menia", |
|
|
"51": "Ouled Djellal", "52": "Bordj Badji Mokhtar", |
|
|
"53": "Béni Abbès", "54": "Timimoun", |
|
|
"55": "Touggourt", "56": "Djanet", |
|
|
"57": "In Salah", "58": "In Guezzam" |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
shared_results = { |
|
|
"original_image": None, |
|
|
"img_rgb": None, |
|
|
"img_draw": None, |
|
|
"plate_crop_img": None, |
|
|
"logo_crop_img": None, |
|
|
"plate_with_chars_img": None, |
|
|
"trocr_char_list": [], |
|
|
"trocr_combined_text": "", |
|
|
"classification_result": "", |
|
|
"label_color": "", |
|
|
"label_orientation": "", |
|
|
"vehicle_type": "", |
|
|
"vehicle_model": "", |
|
|
"vehicle_brand": "", |
|
|
"logo_recognition_results": [], |
|
|
"current_frame": None, |
|
|
"video_path": None, |
|
|
"video_processing": False, |
|
|
"frame_count": 0, |
|
|
"total_frames": 0, |
|
|
"original_video_dimensions": None, |
|
|
"corrected_orientation": False, |
|
|
"vehicle_box": None, |
|
|
"vehicle_detected": False, |
|
|
"detection_boxes": { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_complete_results(plate_info, color, model, orientation, vehicle_type, brand): |
|
|
"""Sauvegarde toutes les informations dans resultats.txt""" |
|
|
with open("resultats.txt", "a", encoding="utf-8") as f: |
|
|
f.write("\n" + "="*60 + "\n") |
|
|
f.write(f"ANALYSE EFFECTUÉE LE : {datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\n") |
|
|
f.write("="*60 + "\n\n") |
|
|
|
|
|
|
|
|
f.write("INFORMATIONS PLAQUE:\n") |
|
|
f.write("-"*50 + "\n") |
|
|
if plate_info: |
|
|
f.write(f"Numéro complet: {plate_info.get('matricule_complet', 'N/A')}\n") |
|
|
f.write(f"Wilaya: {plate_info.get('wilaya', ('', 'N/A'))[1]} ({plate_info.get('wilaya', ('', ''))[0]})\n") |
|
|
f.write(f"Année: {plate_info.get('annee', 'N/A')}\n") |
|
|
f.write(f"Catégorie: {plate_info.get('categorie', ('', 'N/A'))[1]} ({plate_info.get('categorie', ('', ''))[0]})\n") |
|
|
f.write(f"Série: {plate_info.get('serie', 'N/A')}\n") |
|
|
else: |
|
|
f.write("Aucune information de plaque disponible\n") |
|
|
|
|
|
|
|
|
f.write("\nCARACTÉRISTIQUES VÉHICULE:\n") |
|
|
f.write("-"*50 + "\n") |
|
|
f.write(f"Couleur: {color if color else 'Non détectée'}\n") |
|
|
f.write(f"Marque: {brand if brand else 'Non détectée'}\n") |
|
|
f.write(f"Modèle: {model if model else 'Non détecté'}\n") |
|
|
f.write(f"Orientation: {orientation if orientation else 'Non détectée'}\n") |
|
|
f.write(f"Type de véhicule: {vehicle_type if vehicle_type else 'Non détecté'}\n") |
|
|
f.write("\n" + "="*60 + "\n\n") |
|
|
|
|
|
def format_vehicle_type(class_name): |
|
|
"""Formate les noms des classes de véhicules pour l'affichage""" |
|
|
vehicle_types = { |
|
|
'car': 'CAR', |
|
|
'truck': 'TRUCK', |
|
|
'bus': 'BUS', |
|
|
'motorcycle': 'MOTORCYCLE', |
|
|
'van': 'VAN', |
|
|
|
|
|
} |
|
|
return vehicle_types.get(class_name.lower(), class_name.upper()) |
|
|
|
|
|
|
|
|
def preprocess_image(image): |
|
|
""" |
|
|
Applique un prétraitement complet à l'image: |
|
|
1. Normalisation des couleurs avec CLAHE |
|
|
2. Réduction du bruit avec Non-Local Means Denoising |
|
|
3. Accentuation des contours avec un filtre |
|
|
""" |
|
|
try: |
|
|
|
|
|
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) |
|
|
l, a, b = cv2.split(lab) |
|
|
|
|
|
|
|
|
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) |
|
|
l_clahe = clahe.apply(l) |
|
|
lab_clahe = cv2.merge((l_clahe, a, b)) |
|
|
img_clahe = cv2.cvtColor(lab_clahe, cv2.COLOR_LAB2BGR) |
|
|
|
|
|
|
|
|
img_denoised = cv2.fastNlMeansDenoisingColored(img_clahe, None, 10, 10, 7, 21) |
|
|
|
|
|
|
|
|
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) |
|
|
img_sharpened = cv2.filter2D(img_denoised, -1, kernel) |
|
|
|
|
|
return img_sharpened |
|
|
except Exception as e: |
|
|
print(f"Erreur dans le prétraitement: {str(e)}") |
|
|
return image |
|
|
|
|
|
|
|
|
def is_algerian_plate(text): |
|
|
digits_only = ''.join(c for c in text if c.isdigit()) |
|
|
if len(digits_only) not in [9, 10, 11]: |
|
|
return False |
|
|
wilaya_code = digits_only[-2:] |
|
|
return wilaya_code.isdigit() and 1 <= int(wilaya_code) <= 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_plate(text): |
|
|
"""Classification complète du numéro de plaque algérienne""" |
|
|
try: |
|
|
|
|
|
clean_text = ''.join(c for c in text if c.isalnum()).upper() |
|
|
|
|
|
if len(clean_text) < 7 or not is_algerian_plate(clean_text): |
|
|
return None |
|
|
|
|
|
matricule_complet = clean_text |
|
|
position = clean_text[:-5] |
|
|
middle = clean_text[-5:-2] |
|
|
wilaya_code = clean_text[-2:] |
|
|
|
|
|
if not middle.isdigit() or not wilaya_code.isdigit(): |
|
|
return None |
|
|
|
|
|
categorie = middle[0] |
|
|
annee = f"20{middle[1:]}" if middle[1:].isdigit() else "Inconnue" |
|
|
wilaya = WILAYAS.get(wilaya_code, "Wilaya inconnue") |
|
|
vehicle_type = CATEGORIES.get(categorie, "Catégorie inconnue") |
|
|
|
|
|
return { |
|
|
'matricule_complet': matricule_complet, |
|
|
'wilaya': (wilaya_code, wilaya), |
|
|
'annee': annee, |
|
|
'categorie': (categorie, vehicle_type), |
|
|
'serie': position |
|
|
} |
|
|
except Exception as e: |
|
|
print(f"Erreur de classification: {str(e)}") |
|
|
return None |
|
|
|
|
|
|
|
|
def predict_brand(image): |
|
|
"""Prédire la marque de voiture à partir de l'image en utilisant le modèle CNN""" |
|
|
try: |
|
|
img = Image.fromarray(image).resize((224, 224)) |
|
|
img_array = np.array(img) / 255.0 |
|
|
img_array = np.expand_dims(img_array, axis=0) |
|
|
|
|
|
predictions = cnn_logo_model.predict(img_array) |
|
|
predicted_class = np.argmax(predictions[0]) |
|
|
confidence = predictions[0][predicted_class] |
|
|
|
|
|
if confidence < 0.5: |
|
|
return "Marque non détectée (confiance trop faible)" |
|
|
|
|
|
brand = logo_classes[predicted_class] |
|
|
return f"{brand} (confiance: {confidence:.2f})" |
|
|
except Exception as e: |
|
|
print(f"Erreur lors de la prédiction de la marque: {str(e)}") |
|
|
return "Erreur de détection" |
|
|
|
|
|
|
|
|
def recognize_logo(cropped_logo): |
|
|
"""Reconnaître la marque à partir d'un logo détecté""" |
|
|
try: |
|
|
if cropped_logo.size == 0: |
|
|
return "Logo trop petit pour analyse" |
|
|
|
|
|
resized_logo = cv2.resize(np.array(cropped_logo), (128, 128)) |
|
|
rgb_logo = cv2.cvtColor(resized_logo, cv2.COLOR_BGR2RGB) |
|
|
normalized_logo = rgb_logo / 255.0 |
|
|
input_logo = np.expand_dims(normalized_logo, axis=0) |
|
|
|
|
|
predictions = cnn_logo_model.predict(input_logo, verbose=0) |
|
|
pred_index = np.argmax(predictions[0]) |
|
|
pred_label = logo_classes[pred_index] |
|
|
pred_conf = predictions[0][pred_index] |
|
|
|
|
|
if pred_conf < 0.5: |
|
|
return f"Marque incertaine: {pred_label} ({pred_conf:.2f})" |
|
|
|
|
|
return f"{pred_label} (confiance: {pred_conf:.2f})" |
|
|
except Exception as e: |
|
|
print(f"Erreur reconnaissance logo: {str(e)}") |
|
|
return "Erreur d'analyse" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def recognize_model(brand, logo_crop): |
|
|
"""Reconnaître le modèle spécifique d'une voiture à partir de son logo""" |
|
|
try: |
|
|
if brand.lower() not in model_per_brand: |
|
|
return "Modèle non disponible pour cette marque" |
|
|
|
|
|
if logo_crop.size == 0: |
|
|
return "Image trop petite pour analyse" |
|
|
|
|
|
model_recognizer = model_per_brand[brand.lower()] |
|
|
model_input_height, model_input_width = model_recognizer.input_shape[1:3] |
|
|
|
|
|
resized_model = cv2.resize(np.array(logo_crop), (model_input_width, model_input_height)) |
|
|
normalized_model = resized_model / 255.0 |
|
|
input_model = np.expand_dims(normalized_model, axis=0) |
|
|
|
|
|
model_predictions = model_recognizer.predict(input_model) |
|
|
model_index = np.argmax(model_predictions[0]) |
|
|
model_name = model_labels[brand.lower()][model_index] |
|
|
|
|
|
return model_name |
|
|
except Exception as e: |
|
|
print(f"Erreur reconnaissance modèle: {str(e)}") |
|
|
return "Erreur de détection" |
|
|
|
|
|
def draw_detection_boxes(image): |
|
|
"""Dessiner toutes les boîtes de détection sur l'image""" |
|
|
img_draw = image.copy() |
|
|
|
|
|
|
|
|
if shared_results["vehicle_box"]: |
|
|
x1, y1, x2, y2 = shared_results["vehicle_box"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 165, 255), 2) |
|
|
vehicle_type = shared_results.get("vehicle_type", "VEHICLE") |
|
|
cv2.putText(img_draw, vehicle_type, (x1, y1 - 10), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 165, 255), 2) |
|
|
|
|
|
if shared_results["detection_boxes"]["plate"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["plate"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
cv2.putText(img_draw, "PLATE", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) |
|
|
|
|
|
|
|
|
if shared_results["detection_boxes"]["logo"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["logo"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (255, 0, 0), 2) |
|
|
cv2.putText(img_draw, "LOGO", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2) |
|
|
|
|
|
|
|
|
if shared_results["vehicle_model"]: |
|
|
model_text = shared_results["vehicle_model"].split("(")[0].strip() if "(" in shared_results["vehicle_model"] else shared_results["vehicle_model"] |
|
|
cv2.putText(img_draw, f"Model: {model_text}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) |
|
|
|
|
|
|
|
|
if shared_results["detection_boxes"]["color"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["color"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (0, 0, 255), 2) |
|
|
cv2.putText(img_draw, "COLOR", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2) |
|
|
|
|
|
|
|
|
if shared_results["label_color"]: |
|
|
cv2.putText(img_draw, f"{shared_results['label_color']}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) |
|
|
|
|
|
|
|
|
if shared_results["detection_boxes"]["orientation"]: |
|
|
x1, y1, x2, y2 = shared_results["detection_boxes"]["orientation"] |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (255, 255, 0), 2) |
|
|
cv2.putText(img_draw, "ORIENTATION", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2) |
|
|
|
|
|
|
|
|
if shared_results["label_orientation"]: |
|
|
cv2.putText(img_draw, f"{shared_results['label_orientation']}", (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2) |
|
|
|
|
|
return img_draw |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_input(input_data): |
|
|
"""Charger une image ou une vidéo et préparer le premier frame""" |
|
|
if isinstance(input_data, str): |
|
|
if input_data.lower().endswith(('.png', '.jpg', '.jpeg')): |
|
|
|
|
|
return load_image(input_data) |
|
|
else: |
|
|
|
|
|
return load_video(input_data) |
|
|
else: |
|
|
return load_image(input_data) |
|
|
|
|
|
def load_image(image_path): |
|
|
"""Charger et préparer l'image de base""" |
|
|
if isinstance(image_path, str): |
|
|
img = cv2.imread(image_path) |
|
|
else: |
|
|
img = cv2.cvtColor(image_path, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
if img is None: |
|
|
raise gr.Error("Échec de lecture de l'image") |
|
|
|
|
|
|
|
|
img_processed = preprocess_image(img) |
|
|
|
|
|
img_rgb = cv2.cvtColor(img_processed, cv2.COLOR_BGR2RGB) |
|
|
img_draw = img_rgb.copy() |
|
|
|
|
|
shared_results["original_image"] = img |
|
|
shared_results["img_rgb"] = img_rgb |
|
|
shared_results["img_draw"] = img_draw |
|
|
shared_results["video_processing"] = False |
|
|
shared_results["corrected_orientation"] = False |
|
|
|
|
|
|
|
|
shared_results["detection_boxes"] = { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
} |
|
|
|
|
|
return Image.fromarray(img_rgb) |
|
|
|
|
|
|
|
|
def load_video(video_path): |
|
|
"""Charger une vidéo et préparer le premier frame""" |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
if not cap.isOpened(): |
|
|
raise gr.Error("Échec de lecture de la vidéo") |
|
|
|
|
|
|
|
|
shared_results["video_path"] = video_path |
|
|
shared_results["video_processing"] = True |
|
|
shared_results["frame_count"] = 0 |
|
|
shared_results["total_frames"] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
|
|
|
|
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
|
shared_results["original_video_dimensions"] = (width, height) |
|
|
|
|
|
|
|
|
success, frame = cap.read() |
|
|
cap.release() |
|
|
|
|
|
if not success: |
|
|
raise gr.Error("Échec de lecture du premier frame de la vidéo") |
|
|
|
|
|
|
|
|
frame_processed = preprocess_image(frame) |
|
|
|
|
|
img_rgb = cv2.cvtColor(frame_processed, cv2.COLOR_BGR2RGB) |
|
|
img_draw = img_rgb.copy() |
|
|
|
|
|
shared_results["original_image"] = frame |
|
|
shared_results["img_rgb"] = img_rgb |
|
|
shared_results["img_draw"] = img_draw |
|
|
shared_results["current_frame"] = frame_processed |
|
|
shared_results["corrected_orientation"] = False |
|
|
|
|
|
|
|
|
shared_results["detection_boxes"] = { |
|
|
"plate": None, |
|
|
"logo": None, |
|
|
"color": None, |
|
|
"orientation": None |
|
|
} |
|
|
|
|
|
return Image.fromarray(img_rgb) |
|
|
|
|
|
|
|
|
def detect_vehicle(): |
|
|
"""Détecter le véhicule principal dans l'image""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None, "" |
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
results_vehicle = model_vehicle(img_to_process) |
|
|
img_with_boxes = img_to_process.copy() |
|
|
vehicle_detected = False |
|
|
vehicle_type = "" |
|
|
|
|
|
for r in results_vehicle: |
|
|
if r.boxes: |
|
|
for box in r.boxes: |
|
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) |
|
|
cls = int(box.cls[0]) |
|
|
vehicle_type = r.names[cls].upper() |
|
|
|
|
|
cv2.rectangle(img_with_boxes, (x1, y1), (x2, y2), (0, 165, 255), 2) |
|
|
cv2.putText(img_with_boxes, vehicle_type, (x1, y1 - 10), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 165, 255), 2) |
|
|
|
|
|
shared_results["vehicle_box"] = (x1, y1, x2, y2) |
|
|
shared_results["vehicle_detected"] = True |
|
|
shared_results["vehicle_type"] = vehicle_type |
|
|
vehicle_detected = True |
|
|
break |
|
|
|
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
if vehicle_detected: |
|
|
return f"{vehicle_type} détecté", Image.fromarray(img_with_boxes), vehicle_type |
|
|
else: |
|
|
shared_results["vehicle_box"] = None |
|
|
shared_results["vehicle_detected"] = False |
|
|
return "Aucun véhicule détecté", Image.fromarray(img_with_boxes), "" |
|
|
|
|
|
|
|
|
|
|
|
def detect_color(): |
|
|
"""Détecter la couleur du véhicule dans la zone détectée""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None |
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
x1, y1, x2, y2 = shared_results["vehicle_box"] |
|
|
vehicle_roi = img_to_process[y1:y2, x1:x2] |
|
|
results_color = model_color(vehicle_roi) |
|
|
else: |
|
|
results_color = model_color(img_to_process) |
|
|
|
|
|
img_with_boxes = shared_results["img_draw"].copy() if shared_results["img_draw"] is not None else img_to_process.copy() |
|
|
color_detected = False |
|
|
|
|
|
for r in results_color: |
|
|
if hasattr(r, 'boxes') and r.boxes and hasattr(r.boxes, 'cls') and len(r.boxes.cls) > 0: |
|
|
cls = int(r.boxes.cls[0]) |
|
|
shared_results["label_color"] = r.names[cls] |
|
|
|
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
|
|
|
abs_x1 = vx1 + x1 |
|
|
abs_y1 = vy1 + y1 |
|
|
abs_x2 = vx1 + x2 |
|
|
abs_y2 = vy1 + y2 |
|
|
shared_results["detection_boxes"]["color"] = (abs_x1, abs_y1, abs_x2, abs_y2) |
|
|
else: |
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
shared_results["detection_boxes"]["color"] = (x1, y1, x2, y2) |
|
|
|
|
|
color_detected = True |
|
|
|
|
|
|
|
|
img_with_boxes = draw_detection_boxes(img_with_boxes) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
if color_detected: |
|
|
return f"Couleur: {shared_results['label_color']}", Image.fromarray(img_with_boxes) |
|
|
else: |
|
|
return "Couleur non détectée", Image.fromarray(img_with_boxes) |
|
|
|
|
|
def detect_orientation(): |
|
|
"""Détecter l'orientation du véhicule""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo" |
|
|
|
|
|
|
|
|
img_to_process = shared_results["img_rgb"] |
|
|
if shared_results["video_processing"]: |
|
|
|
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
shared_results["corrected_orientation"] = True |
|
|
|
|
|
results_orientation = model_orientation(img_to_process) |
|
|
for r in results_orientation: |
|
|
if hasattr(r, 'boxes') and r.boxes and hasattr(r.boxes, 'cls') and len(r.boxes.cls) > 0: |
|
|
cls = int(r.boxes.cls[0]) |
|
|
shared_results["label_orientation"] = r.names[cls] |
|
|
|
|
|
|
|
|
box = r.boxes.xyxy[0].cpu().numpy() |
|
|
x1, y1, x2, y2 = map(int, box) |
|
|
shared_results["detection_boxes"]["orientation"] = (x1, y1, x2, y2) |
|
|
|
|
|
|
|
|
img_with_boxes = draw_detection_boxes(shared_results["img_rgb"]) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
return f"Orientation: {shared_results['label_orientation']}" if shared_results['label_orientation'] else "Orientation non détectée", Image.fromarray(img_with_boxes) |
|
|
|
|
|
def detect_logo_and_model(): |
|
|
"""Détecter et reconnaître le logo et le modèle du véhicule""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image", None, None, None, None |
|
|
|
|
|
shared_results["logo_recognition_results"] = [] |
|
|
img_draw = shared_results["img_draw"].copy() |
|
|
detected_model = "Modèle non détecté" |
|
|
|
|
|
results_logo = model_logo_detection(shared_results["img_rgb"]) |
|
|
if results_logo and results_logo[0].boxes: |
|
|
for box in results_logo[0].boxes: |
|
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) |
|
|
cv2.rectangle(img_draw, (x1, y1), (x2, y2), (255, 0, 0), 2) |
|
|
|
|
|
logo_crop = shared_results["img_rgb"][y1:y2, x1:x2] |
|
|
shared_results["logo_crop_img"] = Image.fromarray(logo_crop) |
|
|
|
|
|
|
|
|
logo_recognition = recognize_logo(shared_results["logo_crop_img"]) |
|
|
shared_results["logo_recognition_results"].append(logo_recognition) |
|
|
|
|
|
cv2.putText(img_draw, "LOGO", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255,0,0), 2) |
|
|
|
|
|
if not shared_results["vehicle_brand"] or "confiance" not in shared_results["vehicle_brand"]: |
|
|
shared_results["vehicle_brand"] = logo_recognition |
|
|
|
|
|
|
|
|
brand = None |
|
|
if "(" in logo_recognition: |
|
|
brand = logo_recognition.split("(")[0].strip().lower() |
|
|
else: |
|
|
brand = logo_recognition.lower() |
|
|
|
|
|
if brand in model_per_brand: |
|
|
try: |
|
|
detected_model = recognize_model(brand, shared_results["logo_crop_img"]) |
|
|
|
|
|
|
|
|
cv2.putText(img_draw, f"Modèle: {detected_model}", (x1, y2 + 20), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2) |
|
|
except Exception as e: |
|
|
print(f"Erreur reconnaissance modèle: {str(e)}") |
|
|
detected_model = "Erreur reconnaissance modèle" |
|
|
|
|
|
shared_results["img_draw"] = img_draw |
|
|
shared_results["vehicle_model"] = detected_model |
|
|
|
|
|
|
|
|
if not shared_results["vehicle_brand"] or "incertaine" in shared_results["vehicle_brand"] or "Erreur" in shared_results["vehicle_brand"]: |
|
|
global_brand = predict_brand(shared_results["img_rgb"]) |
|
|
if global_brand and "non détectée" not in global_brand: |
|
|
shared_results["vehicle_brand"] = global_brand |
|
|
|
|
|
logo_results_text = " | ".join(shared_results["logo_recognition_results"]) if shared_results["logo_recognition_results"] else "Aucun logo reconnu" |
|
|
|
|
|
return ( |
|
|
f"Marque: {shared_results['vehicle_brand']}" if shared_results['vehicle_brand'] else "Marque non détectée", |
|
|
f"Modèle: {shared_results['vehicle_model']}" if shared_results['vehicle_model'] else "Modèle non détecté", |
|
|
f"Reconnaissance logo: {logo_results_text}", |
|
|
Image.fromarray(img_draw), |
|
|
shared_results["logo_crop_img"] |
|
|
) |
|
|
|
|
|
def detect_plate(): |
|
|
"""Détecter la plaque d'immatriculation et reconnaître les caractères""" |
|
|
if shared_results["img_rgb"] is None: |
|
|
return "Veuillez d'abord charger une image/vidéo", None, None, None |
|
|
|
|
|
shared_results["trocr_char_list"] = [] |
|
|
shared_results["trocr_combined_text"] = "" |
|
|
img_to_process = shared_results["img_rgb"] |
|
|
|
|
|
|
|
|
if shared_results.get("corrected_orientation", False): |
|
|
height, width = img_to_process.shape[:2] |
|
|
if height > width: |
|
|
img_to_process = cv2.rotate(img_to_process, cv2.ROTATE_90_CLOCKWISE) |
|
|
|
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
roi = img_to_process[vy1:vy2, vx1:vx2] |
|
|
results_plate = model_plate_detection(roi) |
|
|
else: |
|
|
results_plate = model_plate_detection(img_to_process) |
|
|
|
|
|
if results_plate and results_plate[0].boxes: |
|
|
for box in results_plate[0].boxes: |
|
|
|
|
|
if shared_results["vehicle_detected"] and shared_results["vehicle_box"]: |
|
|
vx1, vy1, vx2, vy2 = shared_results["vehicle_box"] |
|
|
rx1, ry1, rx2, ry2 = map(int, box.xyxy[0]) |
|
|
|
|
|
x1 = vx1 + rx1 |
|
|
y1 = vy1 + ry1 |
|
|
x2 = vx1 + rx2 |
|
|
y2 = vy1 + ry2 |
|
|
else: |
|
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) |
|
|
|
|
|
shared_results["detection_boxes"]["plate"] = (x1, y1, x2, y2) |
|
|
plate_crop = img_to_process[y1:y2, x1:x2] |
|
|
shared_results["plate_crop_img"] = Image.fromarray(plate_crop) |
|
|
plate_for_char_draw = plate_crop.copy() |
|
|
|
|
|
|
|
|
results_chars = model_characters(plate_crop) |
|
|
char_boxes = [] |
|
|
for r in results_chars: |
|
|
if r.boxes: |
|
|
for box in r.boxes: |
|
|
x1c, y1c, x2c, y2c = map(int, box.xyxy[0]) |
|
|
char_boxes.append(((x1c, y1c, x2c, y2c), x1c)) |
|
|
|
|
|
char_boxes.sort(key=lambda x: x[1]) |
|
|
|
|
|
for i, (coords, _) in enumerate(char_boxes): |
|
|
x1c, y1c, x2c, y2c = coords |
|
|
char_crop = plate_crop[y1c:y2c, x1c:x2c] |
|
|
char_pil = Image.fromarray(char_crop).convert("RGB") |
|
|
|
|
|
try: |
|
|
inputs = trocr_processor(images=char_pil, return_tensors="pt").pixel_values |
|
|
generated_ids = trocr_model.generate(inputs) |
|
|
predicted_char = trocr_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() |
|
|
shared_results["trocr_char_list"].append(predicted_char) |
|
|
except Exception as e: |
|
|
shared_results["trocr_char_list"].append("?") |
|
|
|
|
|
cv2.rectangle(plate_for_char_draw, (x1c, y1c), (x2c, y2c), (255, 0, 255), 1) |
|
|
cv2.putText(plate_for_char_draw, predicted_char, (x1c, y1c - 5), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 1) |
|
|
|
|
|
shared_results["plate_with_chars_img"] = Image.fromarray(plate_for_char_draw) |
|
|
shared_results["trocr_combined_text"] = ''.join(shared_results["trocr_char_list"]) |
|
|
break |
|
|
|
|
|
|
|
|
img_with_boxes = draw_detection_boxes(shared_results["img_rgb"]) |
|
|
shared_results["img_draw"] = img_with_boxes |
|
|
|
|
|
return ( |
|
|
Image.fromarray(img_with_boxes), |
|
|
shared_results["plate_crop_img"], |
|
|
shared_results["plate_with_chars_img"], |
|
|
shared_results["trocr_char_list"] |
|
|
) |
|
|
|
|
|
def is_empty_plate(cropped_plate_image): |
|
|
"""Détecte si la plaque est visuellement vide (espace blanc)""" |
|
|
if cropped_plate_image is None: |
|
|
return True |
|
|
|
|
|
|
|
|
if isinstance(cropped_plate_image, Image.Image): |
|
|
plate_img = np.array(cropped_plate_image) |
|
|
else: |
|
|
plate_img = cropped_plate_image |
|
|
|
|
|
|
|
|
gray = cv2.cvtColor(plate_img, cv2.COLOR_RGB2GRAY) |
|
|
|
|
|
|
|
|
_, thresholded = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV) |
|
|
|
|
|
|
|
|
non_white_pixels = cv2.countNonZero(thresholded) |
|
|
|
|
|
|
|
|
total_pixels = gray.shape[0] * gray.shape[1] |
|
|
return non_white_pixels < (0.01 * total_pixels) |
|
|
|
|
|
|
|
|
def classify_plate_number(): |
|
|
"""Classifier le numéro de plaque détecté uniquement si elle est algérienne""" |
|
|
if not shared_results["trocr_combined_text"]: |
|
|
return "Aucun texte de plaque à classifier", "", "❌ Aucune plaque détectée", "" |
|
|
|
|
|
text = shared_results["trocr_combined_text"] |
|
|
|
|
|
if not is_algerian_plate(text): |
|
|
return "Plaque non algérienne détectée", "Type non détecté", "❌ Non algérienne", "" |
|
|
|
|
|
classified_plate = classify_plate(text) |
|
|
if classified_plate: |
|
|
shared_results["classified_plate"] = classified_plate |
|
|
|
|
|
shared_results["classification_result"] = f"Plaque: {classified_plate['matricule_complet']}\n" |
|
|
shared_results["classification_result"] += f"Wilaya: {classified_plate['wilaya'][1]} ({classified_plate['wilaya'][0]})\n" |
|
|
shared_results["classification_result"] += f"Année: {classified_plate['annee']}\n" |
|
|
shared_results["classification_result"] += f"Catégorie: {classified_plate['categorie'][1]} ({classified_plate['categorie'][0]})\n" |
|
|
shared_results["classification_result"] += f"Série: {classified_plate['serie']}\n" |
|
|
|
|
|
shared_results["vehicle_type"] = classified_plate['categorie'][1] |
|
|
|
|
|
save_complete_results( |
|
|
plate_info=classified_plate, |
|
|
color=shared_results["label_color"], |
|
|
model=shared_results["vehicle_model"], |
|
|
orientation=shared_results["label_orientation"], |
|
|
vehicle_type=shared_results["vehicle_type"], |
|
|
brand=shared_results["vehicle_brand"] |
|
|
) |
|
|
|
|
|
return ( |
|
|
shared_results["classification_result"], |
|
|
f"Type: {shared_results['vehicle_type']}" if shared_results['vehicle_type'] else "Type non détecté", |
|
|
"✅ Plaque algérienne", |
|
|
"Classification réussie" |
|
|
) |
|
|
else: |
|
|
return "Impossible de classifier la plaque", "Type non détecté", "❌ Plaque invalide", "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DB_PATH = "vehicules_database.db" |
|
|
TIME_PATTERN = re.compile(r'^\d{2}:\d{2}-\d{2}:\d{2}$') |
|
|
|
|
|
def create_connection(): |
|
|
"""Créer une connexion à la base SQLite""" |
|
|
conn = None |
|
|
try: |
|
|
conn = sqlite3.connect(DB_PATH) |
|
|
return conn |
|
|
except Error as e: |
|
|
print(f"Erreur de connexion à SQLite: {e}") |
|
|
return conn |
|
|
|
|
|
def init_database(): |
|
|
"""Initialiser la base de données SQLite""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute(""" |
|
|
CREATE TABLE IF NOT EXISTS vehicules ( |
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
|
plaque TEXT NOT NULL UNIQUE, |
|
|
marque TEXT, |
|
|
modele TEXT, |
|
|
couleur TEXT, |
|
|
statut TEXT, |
|
|
plage_horaire TEXT, |
|
|
date_enregistrement TEXT |
|
|
) |
|
|
""") |
|
|
conn.commit() |
|
|
except Error as e: |
|
|
print(f"Erreur création table: {e}") |
|
|
finally: |
|
|
conn.close() |
|
|
|
|
|
def check_vehicle(plate_text): |
|
|
"""Vérifier si un véhicule existe dans la base""" |
|
|
init_database() |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute("SELECT statut, plage_horaire FROM vehicules WHERE plaque = ?", (plate_text,)) |
|
|
result = cursor.fetchone() |
|
|
|
|
|
if result: |
|
|
return True, f"Statut: {result[0]} | Accès: {result[1]}" |
|
|
return False, "Véhicule non enregistré" |
|
|
except Error as e: |
|
|
print(f"Erreur lecture base: {e}") |
|
|
return False, "Erreur base de données" |
|
|
finally: |
|
|
conn.close() |
|
|
return False, "Erreur de connexion" |
|
|
|
|
|
def save_vehicle(plate_info, color, model, brand, status, time_range): |
|
|
"""Enregistrer un nouveau véhicule""" |
|
|
init_database() |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
|
|
|
plate_number = str(plate_info['matricule_complet']).strip() |
|
|
clean_brand = brand.split('(')[0].strip() if '(' in brand else brand |
|
|
clean_model = model.split('(')[0].strip() if '(' in model else model |
|
|
|
|
|
cursor = conn.cursor() |
|
|
|
|
|
|
|
|
cursor.execute("SELECT 1 FROM vehicules WHERE plaque = ?", (plate_number,)) |
|
|
if cursor.fetchone(): |
|
|
return False, "Véhicule déjà existant" |
|
|
|
|
|
|
|
|
cursor.execute(""" |
|
|
INSERT INTO vehicules (plaque, marque, modele, couleur, statut, plage_horaire, date_enregistrement) |
|
|
VALUES (?, ?, ?, ?, ?, ?, ?) |
|
|
""", ( |
|
|
plate_number, |
|
|
clean_brand, |
|
|
clean_model, |
|
|
color, |
|
|
status, |
|
|
time_range, |
|
|
datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
)) |
|
|
|
|
|
conn.commit() |
|
|
return True, "Enregistrement réussi" |
|
|
except Error as e: |
|
|
return False, f"Erreur enregistrement: {e}" |
|
|
finally: |
|
|
conn.close() |
|
|
return False, "Erreur de connexion" |
|
|
|
|
|
def is_access_allowed(plate_text): |
|
|
"""Vérifier si l'accès est autorisé à l'heure actuelle""" |
|
|
conn = create_connection() |
|
|
if conn is not None: |
|
|
try: |
|
|
cursor = conn.cursor() |
|
|
cursor.execute("SELECT statut, plage_horaire FROM vehicules WHERE plaque = ?", (plate_text,)) |
|
|
vehicle = cursor.fetchone() |
|
|
|
|
|
if not vehicle: |
|
|
return False |
|
|
|
|
|
if vehicle[0] == "Non Autorisé": |
|
|
return False |
|
|
|
|
|
if vehicle[1] == "24/24": |
|
|
return True |
|
|
|
|
|
current_time = datetime.now().time() |
|
|
start_str, end_str = vehicle[1].split('-') |
|
|
start = time(*map(int, start_str.split(':'))) |
|
|
end = time(*map(int, end_str.split(':'))) |
|
|
|
|
|
return start <= current_time <= end |
|
|
except Error as e: |
|
|
print(f"Erreur vérification accès: {e}") |
|
|
return False |
|
|
finally: |
|
|
conn.close() |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="🚗 Système de Reconnaissance de Véhicules Algériens", theme="soft") as demo: |
|
|
gr.Markdown("# 🚗 Système de Reconnaissance de Véhicules Algériens") |
|
|
gr.Markdown("Détection de plaque d'immatriculation, logo, couleur, modèle et autres caractéristiques du véhicule") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
|
|
|
input_type = gr.Radio(["Image", "Vidéo"], label="Type d'entrée", value="Image", interactive=True) |
|
|
file_input = gr.File(label="Déposer le fichier ici - ou - Cliquez pour télécharger", |
|
|
file_types=["image", "video"]) |
|
|
load_btn = gr.Button("Charger le fichier", variant="primary") |
|
|
|
|
|
|
|
|
|
|
|
frame_gallery = gr.Gallery(visible=False, label="Sélectionnez un frame", columns=4) |
|
|
frame_slider = gr.Slider(visible=False, interactive=True, label="Frame sélectionné") |
|
|
load_frame_btn = gr.Button(visible=False, value="Charger le frame sélectionné", variant="secondary") |
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
detect_vehicle_btn = gr.Button("Détection de véhicule", variant="secondary") |
|
|
detect_color_btn = gr.Button("Détection de couleur", variant="secondary") |
|
|
|
|
|
with gr.Row(): |
|
|
detect_orientation_btn = gr.Button("Détection de l'orientation", variant="secondary") |
|
|
detect_logo_btn = gr.Button("Logo et modèle", variant="secondary") |
|
|
|
|
|
with gr.Row(): |
|
|
detect_plate_btn = gr.Button("Détection de plaque", variant="secondary") |
|
|
classify_plate_btn = gr.Button("Classifier plaque", variant="secondary") |
|
|
|
|
|
with gr.Row(): |
|
|
next_frame_btn = gr.Button("Frame suivant", visible=False, variant="secondary") |
|
|
|
|
|
|
|
|
with gr.Tab("Gestion Accès"): |
|
|
with gr.Row(): |
|
|
check_btn = gr.Button("🔍 Vérifier Véhicule", variant="primary") |
|
|
save_btn = gr.Button("💾 Enregistrer", interactive=False, variant="primary") |
|
|
|
|
|
with gr.Row(visible=False) as access_form: |
|
|
with gr.Column(): |
|
|
access_status = gr.Radio( |
|
|
["Autorisé", "Non Autorisé"], |
|
|
label="Statut d'accès" |
|
|
) |
|
|
time_range = gr.Dropdown( |
|
|
["24/24", "8:00-16:00", "9:00-17:00", "Personnalisé..."], |
|
|
label="Plage horaire" |
|
|
) |
|
|
custom_time = gr.Textbox( |
|
|
visible=False, |
|
|
placeholder="HH:MM-HH:MM", |
|
|
label="Entrez la plage horaire" |
|
|
) |
|
|
save_btn = gr.Button("Confirmer Enregistrement", variant="primary") |
|
|
|
|
|
access_output = gr.Textbox(label="Résultat Vérification") |
|
|
|
|
|
with gr.Column(): |
|
|
original_image = gr.Image(label="Image originale") |
|
|
processed_image = gr.Image(label="Image annotée") |
|
|
status_output = gr.Textbox(label="Statut") |
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("Vehicle"): |
|
|
vehicle_type_output = gr.Textbox(label="Type de véhicule") |
|
|
|
|
|
|
|
|
with gr.Tab("Color"): |
|
|
color_output = gr.Textbox(label="Color detection") |
|
|
|
|
|
|
|
|
with gr.Tab("Orientation"): |
|
|
orientation_output = gr.Textbox(label="Orientation detection") |
|
|
|
|
|
|
|
|
with gr.Tab("Brand & Model"): |
|
|
with gr.Column(): |
|
|
logo_output = gr.Textbox(label="Brand detection") |
|
|
model_output = gr.Textbox(label="model recognition") |
|
|
|
|
|
logo_image = gr.Image(label="detected logo") |
|
|
|
|
|
with gr.Tab("Plate"): |
|
|
with gr.Column(): |
|
|
plate_image = gr.Image(label="Detected Plate") |
|
|
plate_chars_image = gr.Image(label="plate with characters") |
|
|
plate_chars_list = gr.Textbox(label="Detected characters") |
|
|
|
|
|
with gr.Tab("Classification"): |
|
|
with gr.Column(): |
|
|
plate_classification = gr.Textbox(label="Plate Details") |
|
|
vehicle_type_output = gr.Textbox(label="Type de véhicule") |
|
|
with gr.Row(): |
|
|
algerian_check_output = gr.Textbox(label="Origine", scale=2) |
|
|
action_output = gr.Textbox(label="Action recommandée", scale=3) |
|
|
|
|
|
def update_input_visibility(input_type): |
|
|
if input_type == "Vidéo": |
|
|
return gr.Button(visible=True) |
|
|
else: |
|
|
return gr.Button(visible=False) |
|
|
|
|
|
input_type.change( |
|
|
fn=update_input_visibility, |
|
|
inputs=input_type, |
|
|
outputs=next_frame_btn |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_load(input_type, files): |
|
|
if files is None: |
|
|
raise gr.Error("Veuillez sélectionner un fichier") |
|
|
|
|
|
file_path = files.name if hasattr(files, 'name') else files |
|
|
|
|
|
if input_type == "Image" and not file_path.lower().endswith(('.png', '.jpg', '.jpeg')): |
|
|
raise gr.Error("Veuillez sélectionner une image valide (PNG, JPG, JPEG)") |
|
|
elif input_type == "Vidéo" and not file_path.lower().endswith(('.mp4', '.avi', '.mov')): |
|
|
raise gr.Error("Veuillez sélectionner une vidéo valide (MP4, AVI, MOV)") |
|
|
|
|
|
if input_type == "Image": |
|
|
return ( |
|
|
load_image(file_path), |
|
|
"Image chargée - Cliquez sur les boutons pour analyser", |
|
|
gr.Button(visible=False), |
|
|
gr.Gallery(visible=False), |
|
|
gr.Slider(visible=False), |
|
|
gr.Button(visible=False) |
|
|
) |
|
|
else: |
|
|
|
|
|
frames = extract_video_frames(file_path) |
|
|
shared_results["video_path"] = file_path |
|
|
shared_results["video_frames"] = frames |
|
|
|
|
|
return ( |
|
|
None, |
|
|
f"Vidéo chargée - {len(frames)} frames extraits", |
|
|
gr.Button(visible=True), |
|
|
gr.Gallery(visible=True, value=[(img, f"Frame {pos}") for pos, img in frames]), |
|
|
gr.Slider(visible=True, maximum=len(frames)-1, value=0, step=1, label="Frame sélectionné"), |
|
|
gr.Button(visible=True, value="Charger le frame sélectionné") |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def toggle_time_range(choice): |
|
|
"""Afficher/masquer le champ personnalisé""" |
|
|
if choice == "Personnalisé...": |
|
|
return gr.Textbox(visible=True) |
|
|
return gr.Textbox(visible=False) |
|
|
|
|
|
def verify_vehicle(): |
|
|
"""Vérifier l'existence du véhicule""" |
|
|
if not shared_results["trocr_combined_text"]: |
|
|
raise gr.Error("Aucune plaque détectée") |
|
|
|
|
|
plate_info = classify_plate(shared_results["trocr_combined_text"]) |
|
|
if not plate_info: |
|
|
raise gr.Error("Plaque non valide") |
|
|
|
|
|
exists, message = check_vehicle(plate_info['matricule_complet']) |
|
|
|
|
|
if exists: |
|
|
allowed = "✅ ACCÈS AUTORISÉ" if is_access_allowed(plate_info['matricule_complet']) else "❌ ACCÈS REFUSÉ" |
|
|
return { |
|
|
access_output: f"{message}\n{allowed}", |
|
|
access_form: gr.update(visible=False), |
|
|
save_btn: gr.update(interactive=False) |
|
|
} |
|
|
else: |
|
|
return { |
|
|
access_output: message, |
|
|
access_form: gr.update(visible=True), |
|
|
save_btn: gr.update(interactive=True) |
|
|
} |
|
|
|
|
|
def save_vehicle_info(status, time_choice, custom_time_input): |
|
|
"""Enregistrer les informations du véhicule""" |
|
|
if not shared_results.get("classified_plate"): |
|
|
raise gr.Error("Aucune information de plaque disponible") |
|
|
|
|
|
plate_info = shared_results["classified_plate"] |
|
|
|
|
|
|
|
|
if time_choice == "Personnalisé...": |
|
|
if not TIME_PATTERN.match(custom_time_input): |
|
|
raise gr.Error("Format horaire invalide. Utilisez HH:MM-HH:MM") |
|
|
time_range = custom_time_input |
|
|
else: |
|
|
time_range = time_choice |
|
|
|
|
|
|
|
|
brand = shared_results.get("vehicle_brand", "Inconnu") |
|
|
model = shared_results.get("vehicle_model", "Inconnu") |
|
|
|
|
|
|
|
|
success, message = save_vehicle( |
|
|
plate_info, |
|
|
shared_results.get("label_color", "Inconnu"), |
|
|
model, |
|
|
brand, |
|
|
status, |
|
|
time_range |
|
|
) |
|
|
|
|
|
if not success: |
|
|
raise gr.Error(message) |
|
|
|
|
|
return { |
|
|
access_output: message, |
|
|
access_form: gr.update(visible=False), |
|
|
save_btn: gr.update(interactive=False) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
load_btn.click( |
|
|
fn=process_load, |
|
|
inputs=[input_type, file_input], |
|
|
outputs=[original_image, status_output, next_frame_btn] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
detect_vehicle_btn.click( |
|
|
fn=detect_vehicle, |
|
|
outputs=[status_output, processed_image, vehicle_type_output] |
|
|
) |
|
|
|
|
|
detect_color_btn.click( |
|
|
fn=detect_color, |
|
|
outputs=[color_output, processed_image] |
|
|
) |
|
|
|
|
|
detect_orientation_btn.click( |
|
|
fn=detect_orientation, |
|
|
outputs=[orientation_output, processed_image] |
|
|
) |
|
|
|
|
|
detect_logo_btn.click( |
|
|
fn=detect_logo_and_model, |
|
|
outputs=[logo_output, model_output, logo_output, processed_image, logo_image] |
|
|
) |
|
|
|
|
|
detect_plate_btn.click( |
|
|
fn=detect_plate, |
|
|
outputs=[processed_image, plate_image, plate_chars_image, plate_chars_list] |
|
|
) |
|
|
|
|
|
classify_plate_btn.click( |
|
|
fn=classify_plate_number, |
|
|
outputs=[ |
|
|
plate_classification, |
|
|
vehicle_type_output, |
|
|
algerian_check_output, |
|
|
action_output |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
time_range.change( |
|
|
fn=toggle_time_range, |
|
|
inputs=time_range, |
|
|
outputs=custom_time |
|
|
) |
|
|
|
|
|
check_btn.click( |
|
|
fn=verify_vehicle, |
|
|
outputs=[access_output, access_form, save_btn] |
|
|
) |
|
|
|
|
|
save_btn.click( |
|
|
fn=save_vehicle_info, |
|
|
inputs=[access_status, time_range, custom_time], |
|
|
outputs=[access_output, access_form, save_btn] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
init_database() |
|
|
demo.launch() |