File size: 3,550 Bytes
e92cb9b
093562b
 
e92cb9b
093562b
 
04b96ce
3ae443a
04b96ce
 
3ae443a
 
04b96ce
 
3ae443a
 
 
04b96ce
5bcdf6f
e92cb9b
04b96ce
 
3ae443a
04b96ce
 
 
093562b
3ae443a
 
04b96ce
 
3ae443a
 
 
 
04b96ce
3ae443a
04b96ce
 
 
3ae443a
 
 
 
04b96ce
3ae443a
04b96ce
 
 
3ae443a
 
 
 
 
04b96ce
 
093562b
04b96ce
 
 
093562b
04b96ce
3ae443a
 
 
 
 
04b96ce
3ae443a
 
 
04b96ce
 
e92cb9b
3ae443a
 
04b96ce
093562b
04b96ce
 
3ae443a
 
04b96ce
093562b
3ae443a
04b96ce
 
093562b
04b96ce
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import streamlit as st
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase
import av
from transformers import DetrImageProcessor, DetrForObjectDetection, TrOCRProcessor, VisionEncoderDecoderModel
from PIL import Image, ImageDraw
import numpy as np
import torch

# Step 1: Load Models
# DETR for object detection
detr_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
detr_model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")

# TrOCR for text recognition
trocr_processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-stage1")
trocr_model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-stage1")

# Authorized car database for verification
authorized_cars = {"KA01AB1234", "MH12XY5678", "DL8CAF9090", "CH01AG2863"}  # Example data


# Step 2: Define Helper Functions
def detect_license_plate(frame):
    """
    Detect license plates in the frame using DETR.
    """
    pil_image = Image.fromarray(frame)
    inputs = detr_processor(images=pil_image, return_tensors="pt")
    outputs = detr_model(**inputs)
    
    # Get bounding boxes
    target_sizes = torch.tensor([pil_image.size[::-1]])
    results = detr_processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)
    return results[0]["boxes"], pil_image


def recognize_text_from_plate(cropped_plate):
    """
    Recognize text from the cropped license plate image using TrOCR.
    """
    inputs = trocr_processor(images=cropped_plate, return_tensors="pt")
    outputs = trocr_model.generate(**inputs)
    return trocr_processor.batch_decode(outputs, skip_special_tokens=True)[0]


def verify_plate(plate_text):
    """
    Check if the recognized plate text exists in the authorized cars database.
    """
    if plate_text in authorized_cars:
        return f"✅ Access Granted: {plate_text}"
    else:
        return f"❌ Access Denied: {plate_text}"


# Step 3: Custom Video Processor for WebRTC
class LicensePlateProcessor(VideoProcessorBase):
    """
    Custom video processor to handle video frames in real-time.
    """
    def recv(self, frame: av.VideoFrame):
        frame = frame.to_ndarray(format="bgr24")  # Convert frame to NumPy array
        boxes, pil_image = detect_license_plate(frame)
        draw = ImageDraw.Draw(pil_image)

        recognized_plates = []
        for box in boxes:
            # Crop detected license plate
            cropped_plate = pil_image.crop((box[0], box[1], box[2], box[3]))
            plate_text = recognize_text_from_plate(cropped_plate)
            recognized_plates.append(plate_text)

            # Draw bounding box and label on the image
            draw.rectangle(box.tolist(), outline="red", width=3)
            draw.text((box[0], box[1]), plate_text, fill="red")

        # Convert back to OpenCV format
        processed_frame = np.array(pil_image)

        # Log results in Streamlit UI
        for plate_text in recognized_plates:
            st.write(verify_plate(plate_text))

        return av.VideoFrame.from_ndarray(processed_frame, format="bgr24")


# Step 4: Streamlit Interface
st.title("Real-Time Car Number Plate Recognition")
st.write("This app uses Hugging Face Transformers and WebRTC for real-time processing.")

# Start WebRTC Streamer
webrtc_streamer(
    key="plate-recognition",
    video_processor_factory=LicensePlateProcessor,
    rtc_configuration={
        # Required to ensure WebRTC works across networks
        "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
    }
)