Tarive commited on
Commit
706fb76
·
1 Parent(s): 46d00bc

Upload face_classification.py

Browse files
Files changed (1) hide show
  1. face_classification.py +219 -0
face_classification.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """face-classification.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/#fileId=https%3A//huggingface.co/spaces/Tarive/Nepali_Actors_Prediction/blob/main/face-classification.ipynb
8
+
9
+ # Importing Libararies
10
+ """
11
+
12
+ import os
13
+ import numpy as np # linear algebra
14
+ import pandas as pd
15
+ import matplotlib.pyplot as plt
16
+ import seaborn as sns
17
+ import shutil
18
+ from PIL import Image
19
+ from sklearn.metrics import classification_report,confusion_matrix
20
+ import tensorflow as tf
21
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, load_img, img_to_array
22
+ from matplotlib.pyplot import imshow
23
+ from tensorflow.keras.callbacks import ReduceLROnPlateau
24
+ from tensorflow.keras.optimizers import RMSprop
25
+ from tensorflow.keras import Model
26
+ from tensorflow.keras import layers
27
+
28
+ """# Looking into structure of file arrangements"""
29
+
30
+ DIR = '/kaggle/input/nepali-celeb-localized-face-dataset/Dataset/Dataset/'
31
+
32
+ files = os.listdir(DIR)
33
+ print(files)
34
+ class_count = len(files)
35
+ print(f'There are {class_count} classes.')
36
+
37
+ # Remove Non JPG images
38
+ for cls in files:
39
+ cls_path = os.path.join(DIR, cls)
40
+ imgs = os.listdir(cls_path)
41
+ img = Image.open(os.path.join(cls_path,imgs[0]))
42
+ print(f'Class {cls} contains {len(imgs)} images images of shape {img.size}.')
43
+
44
+ """# Creating the data generator using ImageDataGenerator for the CNN"""
45
+
46
+ def train_val_generators():
47
+ """
48
+ Creates the training and validation data generators
49
+ Returns:
50
+ train_generator, validation_generator: tuple containing the generators
51
+ """
52
+ # Instantiate the ImageDataGenerator class, normalize pixel values and set arguments to augment the images
53
+ datagen = ImageDataGenerator(rescale=1.0/255.0,
54
+ rotation_range=40,
55
+ width_shift_range=0.1,
56
+ height_shift_range=0.1,
57
+ shear_range=0.1,
58
+ zoom_range=0.1,
59
+ horizontal_flip=True,
60
+ vertical_flip=True,
61
+ fill_mode='nearest',
62
+ validation_split=0.2)
63
+ # Pass in the appropriate arguments to the flow_from_directory method
64
+ train_generator = datagen.flow_from_directory(directory=DIR,
65
+ batch_size=100,
66
+ class_mode='categorical',
67
+ shuffle=True,
68
+ subset='training',
69
+ target_size=(75,75))
70
+
71
+ # Pass in the appropriate arguments to the flow_from_directory method
72
+ validation_generator = datagen.flow_from_directory(directory=DIR,
73
+ batch_size=36,
74
+ class_mode='categorical',
75
+ shuffle = False,
76
+ subset='validation',
77
+ target_size=(75, 75))
78
+ return train_generator, validation_generator
79
+
80
+ train_generator, validation_generator = train_val_generators()
81
+
82
+ """# Define and compile the transfer learning model"""
83
+
84
+ pre_trained_model = tf.keras.applications.inception_v3.InceptionV3(
85
+ input_shape = (75, 75, 3),
86
+ include_top = False,
87
+ weights = 'imagenet')
88
+ for layer in pre_trained_model.layers:
89
+ layer.trainable = False
90
+
91
+ pre_trained_model.summary()
92
+
93
+ # Choose `mixed_7` as the last layer of your base model
94
+ last_layer = pre_trained_model.get_layer('mixed7')
95
+ print('last layer output shape: ', last_layer.output_shape)
96
+ last_output = last_layer.output
97
+
98
+ # Flatten the output layer to 1 dimension
99
+ x = layers.Flatten()(last_output)
100
+ # Add a fully connected layer with 1,024 hidden units and ReLU activation
101
+ x = layers.Dense(512, activation='relu')(x)
102
+ # Add a dropout rate of 0.2
103
+ x = layers.Dropout(0.2)(x)
104
+ # Add a final sigmoid layer for classification
105
+ x = layers.Dense (class_count, activation='softmax')(x)
106
+
107
+ # Append the dense network to the base model
108
+ model_transfer = Model(pre_trained_model.input, x)
109
+
110
+ # Print the model summary. See your dense network connected at the end.
111
+ model_transfer.summary()
112
+
113
+ model_transfer.compile(optimizer='adam',
114
+ loss='categorical_crossentropy',
115
+ metrics=['accuracy'])
116
+
117
+ """# Creating a Callback class"""
118
+
119
+ class myCallback(tf.keras.callbacks.Callback):
120
+ # Define the correct function signature for on_epoch_end
121
+ def on_epoch_end(self, epoch, logs={}):
122
+ if (logs.get('val_accuracy') is not None and logs.get('val_accuracy') > 0.99):
123
+ print(logs.get('val_accuracy'))
124
+ print("\nReached 99% validation accuracy so cancelling training!")
125
+ callbacks = myCallback()
126
+
127
+ reduce_lr = ReduceLROnPlateau(
128
+ monitor='val_loss',
129
+ factor=0.25,
130
+ patience=2,
131
+ min_lr=0.00001,
132
+ verbose=2
133
+ )
134
+
135
+ checkpoint_path = "/kaggle/working/cp.ckpt"
136
+ checkpoint_dir = os.path.dirname(checkpoint_path)
137
+
138
+ # Create a callback that saves the model's weights
139
+ cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
140
+ save_weights_only=True,
141
+ verbose=1)
142
+
143
+ """# Train the model"""
144
+
145
+ history1 = model_transfer.fit(train_generator,
146
+ epochs=50,
147
+ validation_data=validation_generator,
148
+ callbacks=[callbacks, reduce_lr, cp_callback]
149
+ )
150
+
151
+ print("Accuracy of the transfer_learning model is - " , model_transfer.evaluate(validation_generator)[1]*100 , "%")
152
+
153
+ """# Evaluating Accuracy and Loss for the Model"""
154
+
155
+ # Plot the chart for accuracy and loss on both training and validation
156
+ acc = history1.history['accuracy']
157
+ val_acc = history1.history['val_accuracy']
158
+ loss = history1.history['loss']
159
+ val_loss = history1.history['val_loss']
160
+
161
+ epochs = range(len(acc))
162
+
163
+ plt.plot(epochs, acc, 'r', label='Training accuracy')
164
+ plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
165
+ plt.title('Training and validation accuracy')
166
+ plt.legend()
167
+ plt.figure()
168
+
169
+ plt.plot(epochs, loss, 'r', label='Training Loss')
170
+ plt.plot(epochs, val_loss, 'b', label='Validation Loss')
171
+ plt.title('Training and validation loss')
172
+ plt.legend()
173
+
174
+ plt.show()
175
+
176
+ predictions = model_transfer.predict(validation_generator)
177
+ predictions=np.argmax(predictions,axis=-1)
178
+ print(predictions[:10])
179
+ print(validation_generator.labels[:10])
180
+
181
+ dict_cls = validation_generator.class_indices
182
+
183
+ list(dict_cls.keys())
184
+
185
+ """# Evaluating Precision, Recall, F1-Score and Support for the Model"""
186
+
187
+ print(classification_report(validation_generator.labels, predictions, target_names = list(dict_cls.keys())))
188
+
189
+ """# Plotting the Confusion Matrix for the Classification"""
190
+
191
+ cm = confusion_matrix(validation_generator.labels,predictions)
192
+ cm = pd.DataFrame(cm , index = list(dict_cls.keys()) , columns = list(dict_cls.keys()))
193
+ plt.figure(figsize = (15,15))
194
+ sns.heatmap(cm,cmap= "Blues", linecolor = 'black' , linewidth = 1 , annot = True, fmt='')
195
+
196
+ """# Sample Model Prediction"""
197
+
198
+ def class_name(id):
199
+ key_list = list(dict_cls.keys())
200
+ val_list = list(dict_cls.values())
201
+ position = val_list.index(id)
202
+ return key_list[position]
203
+
204
+ f, ax = plt.subplots(10,3)
205
+ f.set_size_inches(10, 10)
206
+ k = 0
207
+ for i in range(10):
208
+ for j in range(3):
209
+ true_cls = validation_generator.labels[k]
210
+ true_cls = class_name(true_cls)
211
+ pred_cls = predictions[k]
212
+ pred_cls = class_name(pred_cls)
213
+ ax[i,j].set_title(f'Actual = {true_cls}\n Predicted = {pred_cls}')
214
+ img=plt.imread(DIR+validation_generator.filenames[k])
215
+ ax[i,j].imshow(img)
216
+ ax[i,j].axis('off')
217
+ k += 2
218
+
219
+ plt.tight_layout()