File size: 10,582 Bytes
9ca0a51
 
 
 
 
 
 
a112e6c
9ca0a51
 
 
 
 
 
 
 
 
 
 
 
 
 
66f68c7
9ca0a51
 
743f73f
0b05083
c336617
8775ba7
9ca0a51
 
 
c336617
 
d2aa485
07387a5
9ca0a51
 
b15f8f4
 
 
9ca0a51
 
 
 
b15f8f4
 
 
81a4855
b15f8f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f61d1e
b15f8f4
 
8993328
b15f8f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8993328
b15f8f4
 
 
 
 
8993328
 
 
220f13d
b15f8f4
 
 
66f68c7
8993328
b15f8f4
220f13d
b15f8f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66f68c7
b15f8f4
0b05083
b15f8f4
 
 
 
 
 
8775ba7
b15f8f4
 
 
 
b7d97a8
 
 
 
 
 
 
 
8775ba7
781a6d1
8775ba7
b7d97a8
 
 
 
 
 
 
 
0d6332b
b7d97a8
 
 
b15f8f4
 
9ca0a51
b15f8f4
 
 
 
66f68c7
 
8775ba7
b15f8f4
 
b7d97a8
b15f8f4
8775ba7
f2b0398
 
 
8775ba7
7223d1f
f02d1fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b0bc5a
 
89b1493
 
 
 
 
 
 
 
6b0bc5a
89b1493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# Transformers and its models
import transformers

# For Image Processing
from transformers import ViTImageProcessor

# For Model
from transformers import ViTModel, ViTConfig, pipeline

# For data augmentation
from torchvision import transforms, datasets

# For GPU
from transformers import set_seed
from torch.optim import AdamW
from accelerate import Accelerator, notebook_launcher

# For Data Loaders
import datasets
from torch.utils.data import Dataset, DataLoader

# For Display
#from tqdm.notebook import tqdm

# Other Generic Libraries
import torch
from PIL import Image
import os
import streamlit as st
import gc
from glob import glob
import shutil
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
from io import BytesIO
import torch.nn.functional as F

# Set the device (GPU or CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Initialse Globle Variables
MODEL_TRANSFORMER = 'google/vit-base-patch16-224'
BATCH_SIZE = 8

# Set Paths
data_path = 'employees'
model_path = 'vit_pytorch_GPU_1.pt'
webcam_path = 'captured_image.jpg'

# Set Title
st.title("Employee Attendance System")
#pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")

# Define Image Processor
image_processor_prod = ViTImageProcessor.from_pretrained(MODEL_TRANSFORMER, attn_implementation="sdpa", torch_dtype=torch.float16)

# Define ML Model
class FaceEmbeddingModel(torch.nn.Module):
    def __init__(self, model_name, embedding_size):
        super(FaceEmbeddingModel, self).__init__()
        self.config = ViTConfig.from_pretrained(model_name, id2label=idx_to_label, label2id=label_to_idx, return_dict=True)
        self.backbone = ViTModel.from_pretrained(model_name, config=self.config)  # Load ViT model
        self.fc = torch.nn.Linear(self.backbone.config.hidden_size, embedding_size) # Convert to 512D feature vector

    def forward(self, images):
        x = self.backbone(images).last_hidden_state[:, 0]  # Extract embeddings
        x = self.fc(x)  # Convert to 512D embedding
        return torch.nn.functional.normalize(x)  # Normalize for cosine similarity

# Load the model
model_pretrained = torch.load(model_path, map_location=device, weights_only=False)

# Define the ML model - Evaluation function
def prod_function(transformer_model, prod_dl, webcam_dl):
    # Initialize accelerator
    accelerator = Accelerator()

    # to INFO for the main process only.
    if accelerator.is_main_process:
        datasets.utils.logging.set_verbosity_warning()
        transformers.utils.logging.set_verbosity_info()
    else:
        datasets.utils.logging.set_verbosity_error()
        transformers.utils.logging.set_verbosity_error()

    # The seed need to be set before we instantiate the model, as it will determine the random head.
    set_seed(42)

    # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the prepare method.
    accelerated_model, acclerated_prod_dl, acclerated_webcam_dl = accelerator.prepare(transformer_model, prod_dl, webcam_dl)

    # Evaluate at the end of the epoch
    accelerated_model.eval()

    # Find Embedding of the image to be evaluated
    for batch in acclerated_webcam_dl:
        with torch.no_grad():
            #img_prod = acclerated_prod_data['pixel_values']
            emb_prod = accelerated_model(batch['pixel_values'])

    prod_preds = []

    for batch in acclerated_prod_dl:
        #img = batch['pixel_values']
        with torch.no_grad():
            emb = accelerated_model(batch['pixel_values'])
        distance = F.pairwise_distance(emb, emb_prod)

        prod_preds.append(distance)
    return prod_preds

# Creation of Dataloader
class CustomDatasetProd(Dataset):
    def __init__(self, pixel_values):
        self.pixel_values = pixel_values

    def __len__(self):
        return len(self.pixel_values)

    def __getitem__(self, idx):
        item = {
            'pixel_values': self.pixel_values[idx].squeeze(0),
        }
        return item
        
# Creation of Dataset       
class CreateDatasetProd():
    def __init__(self, image_processor):
        super().__init__()
        self.image_processor = image_processor
        # Define a transformation pipeline
        self.transform_prod = transforms.v2.Compose([
                                    transforms.v2.ToImage(),
                                    transforms.v2.ToDtype(torch.uint8, scale=False)
                                ])

    def get_pixels(self, img_paths):
        pixel_values = []
        for path in img_paths:
            # Read and process Images
            img = Image.open(path)
            img = self.transform_prod(img)

            # Scaling the video to ML model's desired format
            img = self.image_processor(img, return_tensors='pt') #, input_data_format='channels_first')

            pixel_values.append(img['pixel_values'].squeeze(0))

            # Force garbage collection
            del img
            gc.collect()
        return pixel_values
        
    def get_pixel(self, img_path):
        # Read and process Images
        img = Image.open(img_path)
        img = self.transform_prod(img)

        # Scaling the video to ML model's desired format
        img = self.image_processor(img, return_tensors='pt') #, input_data_format='channels_first')

        pixel_values = img['pixel_values'] #.squeeze(0)

        # Force garbage collection
        del img
        gc.collect()
        
        return pixel_values
        
    def create_dataset(self, image_paths, webcam=False):
        if webcam == True:
            pixel_values = self.get_pixel(image_paths)
        else:
            pixel_values = torch.stack(self.get_pixels(image_paths))
        
        return CustomDatasetProd(pixel_values=pixel_values)
        
# Read images from directory
image_paths = []
image_file = glob(os.path.join(data_path, '*.jpg'))
#st.write(image_file)
image_paths.extend(image_file)
#st.write('input path size:', len(image_paths))
#st.write(image_paths)

# Create DataLoader for Employees image
dataset_prod_obj = CreateDatasetProd(image_processor_prod)
prod_ds = dataset_prod_obj.create_dataset(image_paths, webcam=False)
prod_dl = DataLoader(prod_ds, batch_size=BATCH_SIZE)

## Testing the dataloader
#prod_inputs = next(iter(prod_dl))
#st.write(prod_inputs['pixel_values'].shape) 

about_tab, app_tab = st.tabs(["About the app", "Face Recognition"])
# About the app Tab
with about_tab:
    st.markdown(
        """
        ## Product Description/Objective
        An AI face recognition app for automated employee attendance uses advanced facial recognition technology to accurately and efficiently track employee attendance. 
        By simply scanning employees' faces upon arrival and departure, the app eliminates the need for traditional timecards or biometric devices, reducing errors and fraud. 
        It provides real-time attendance data, enhances workplace security, and streamlines HR processes for greater productivity and accuracy.       
        
        ## How does it work ?
        Our app leverages Google's advanced **Vision Transformer (ViT)** architecture, trained on the **LFW (Labeled Faces in the Wild) dataset**, to deliver highly accurate employee attendance tracking through facial recognition. 
        The AI model intelligently extracts distinct facial features and compares them to the stored data of registered employees. When an employee’s face is scanned, the model analyzes the key features, and a confidence score is generated. 
        A high score indicates a match, confirming the employee’s identity and marking their attendance automatically. This seamless, secure process ensures precise tracking while minimizing errors and enhancing workplace efficiency. 
        
        ### About the architecture.
        The Vision Transformer (ViT) is a deep learning architecture designed for image classification tasks, which applies transformer models—originally developed for natural language processing (NLP)—to images. 
        ViT divides an image into fixed-size non-overlapping patches. Each patch is flattened into a 1D vector, which is then linearly embedded into a higher-dimensional space. The patch embeddings are processed using a standard transformer encoder. 
        This consists of layers with multi-head self-attention and feed-forward networks. The transformer is capable of learning global dependencies across the entire image. 
        The Vision Transformer outperforms traditional convolutional neural networks (CNNs) on large-scale datasets, especially when provided with sufficient training data and computational resources.
        
        ### About the Dataset. 
        Labeled Faces in the Wild (LFW) is a well-known dataset used primarily for evaluating face recognition algorithms. It consists of a collection of facial images of famous individuals from the web.
        LFW contains 13,000+ labeled images of 5,749 different individuals. The faces are collected from various sources, with images often showing individuals in different lighting, poses, and backgrounds.
        LFW is typically used for face verification and face recognition tasks. The goal is to determine if two images represent the same person or not.
        """)

# Gesture recognition Tab
with app_tab:
    # Read image from Camera
    enable = st.checkbox("Enable camera")
    picture = st.camera_input("Take a picture", disabled=not enable)
    if picture is not None:
        #img = Image.open(picture)
        #picture.save(webcam_path, "JPEG")
        #st.write('Image saved as:',webcam_path)
    
        ## Create DataLoader for Webcam Image
        webcam_ds = dataset_prod_obj.create_dataset(picture, webcam=True)
        webcam_dl = DataLoader(webcam_ds, batch_size=BATCH_SIZE)
    
        ## Testing the dataloader
        #prod_inputs = next(iter(webcam_dl))
        #st.write(prod_inputs['pixel_values'].shape)
    
        with st.spinner("Wait for it...", show_time=True):
            # Run the predictions
            prediction = prod_function(model_pretrained, prod_dl, webcam_dl)
            predictions = torch.cat(prediction, 0).to(device)
            match_idx = torch.argmin(predictions)
            st.write(predictions)
            st.write(image_paths)
        
            # Display the results
            if predictions[match_idx] <= 0.3:
              st.write('Welcome: ',image_paths[match_idx].split('/')[-1].split('.')[0])
            else:
              st.write("Match not found")