TejaCherukuri commited on
Commit
f0c1a1a
·
1 Parent(s): 9306487

Add the required files

Browse files
Files changed (36) hide show
  1. app.py +42 -0
  2. gcg/__init__.py +0 -0
  3. gcg/__pycache__/__init__.cpython-310.pyc +0 -0
  4. gcg/__pycache__/config.cpython-310.pyc +0 -0
  5. gcg/__pycache__/exception.cpython-310.pyc +0 -0
  6. gcg/__pycache__/logger.cpython-310.pyc +0 -0
  7. gcg/__pycache__/utils.cpython-310.pyc +0 -0
  8. gcg/components/__init__.py +3 -0
  9. gcg/components/__pycache__/__init__.cpython-310.pyc +0 -0
  10. gcg/components/__pycache__/build_model.cpython-310.pyc +0 -0
  11. gcg/components/__pycache__/data_processor.cpython-310.pyc +0 -0
  12. gcg/components/__pycache__/evaluate_model.cpython-310.pyc +0 -0
  13. gcg/components/__pycache__/grad_cam.cpython-310.pyc +0 -0
  14. gcg/components/__pycache__/gradcam.cpython-310.pyc +0 -0
  15. gcg/components/__pycache__/model.cpython-310.pyc +0 -0
  16. gcg/components/__pycache__/train_model.cpython-310.pyc +0 -0
  17. gcg/components/data_processor.py +82 -0
  18. gcg/components/gradcam.py +189 -0
  19. gcg/components/model.py +323 -0
  20. gcg/config.py +30 -0
  21. gcg/pipelines/__init__.py +1 -0
  22. gcg/pipelines/__pycache__/__init__.cpython-310.pyc +0 -0
  23. gcg/pipelines/__pycache__/inference.cpython-310.pyc +0 -0
  24. gcg/pipelines/evaluate.py +22 -0
  25. gcg/pipelines/inference.py +60 -0
  26. gcg/pipelines/train.py +22 -0
  27. gcg/utils/__init__.py +3 -0
  28. gcg/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  29. gcg/utils/__pycache__/exception.cpython-310.pyc +0 -0
  30. gcg/utils/__pycache__/logger.cpython-310.pyc +0 -0
  31. gcg/utils/__pycache__/utils.cpython-310.pyc +0 -0
  32. gcg/utils/exception.py +21 -0
  33. gcg/utils/logger.py +20 -0
  34. gcg/utils/utils.py +24 -0
  35. requirements.txt +8 -0
  36. setup.py +22 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from gcg.pipelines import predict
3
+ import os
4
+
5
+ # Define the directory to save uploaded files
6
+ TEMP_DIR = "temp"
7
+ os.makedirs(TEMP_DIR, exist_ok=True) # Create the temp directory if it doesn't exist
8
+
9
+ st.title("Retinal Lesion Detector")
10
+ st.subheader("Upload retinal images and get predictions with heatmaps")
11
+
12
+ # File uploader to accept multiple images
13
+ uploaded_files = st.file_uploader(
14
+ "Upload Retinal Images",
15
+ type=["jpg", "jpeg", "png"],
16
+ accept_multiple_files=True
17
+ )
18
+
19
+ if st.button("Run Inference"):
20
+ if uploaded_files:
21
+ img_paths = []
22
+ for uploaded_file in uploaded_files:
23
+ # Save each uploaded file to the temp directory
24
+ file_path = os.path.join(TEMP_DIR, uploaded_file.name)
25
+ with open(file_path, "wb") as f:
26
+ f.write(uploaded_file.getbuffer())
27
+ img_paths.append(file_path) # Collect the file path for inference
28
+
29
+ # Pass the file paths to the predict function
30
+ st.info("Running predictions...")
31
+ predictions = predict(img_paths)
32
+
33
+ # Display predictions and heatmaps
34
+ st.success("Inference completed! Here are the results:")
35
+ for img_path, predicted_class in zip(img_paths, predictions):
36
+ st.write(f"**Image**: {os.path.basename(img_path)}")
37
+ st.write(f"**Predicted Class**: {predicted_class}")
38
+ heatmap_path = os.path.join("heatmaps", f"heatmap_{os.path.basename(img_path)}")
39
+ if os.path.exists(heatmap_path):
40
+ st.image(heatmap_path, caption="Attention Map", use_container_width=True)
41
+ else:
42
+ st.error("Please upload at least one image.")
gcg/__init__.py ADDED
File without changes
gcg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (166 Bytes). View file
 
gcg/__pycache__/config.cpython-310.pyc ADDED
Binary file (979 Bytes). View file
 
gcg/__pycache__/exception.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
gcg/__pycache__/logger.cpython-310.pyc ADDED
Binary file (633 Bytes). View file
 
gcg/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
gcg/components/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .model import build_model, train_model, evaluate_model
2
+ from .data_processor import preprocess_image, load_data
3
+ from .gradcam import grad_cam_plus, show_GradCAM
gcg/components/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (408 Bytes). View file
 
gcg/components/__pycache__/build_model.cpython-310.pyc ADDED
Binary file (8.77 kB). View file
 
gcg/components/__pycache__/data_processor.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
gcg/components/__pycache__/evaluate_model.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
gcg/components/__pycache__/grad_cam.cpython-310.pyc ADDED
Binary file (4.78 kB). View file
 
gcg/components/__pycache__/gradcam.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
gcg/components/__pycache__/model.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
gcg/components/__pycache__/train_model.cpython-310.pyc ADDED
Binary file (919 Bytes). View file
 
gcg/components/data_processor.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import sys
5
+ from gcg import config
6
+ from sklearn.preprocessing import LabelEncoder
7
+ from tensorflow.keras.utils import to_categorical
8
+ from sklearn.model_selection import train_test_split
9
+ from gcg.utils import logging, CustomException, save_object
10
+
11
+
12
+ def preprocess_image(img_path, image_size):
13
+ # Read the image from the specified path
14
+ img = cv2.imread(img_path)
15
+ # Convert the image from BGR to RGB
16
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
17
+ img_array=cv2.resize(img, (image_size[0],image_size[1]), fx=1, fy=1,interpolation = cv2.INTER_CUBIC)
18
+
19
+ return img_array
20
+
21
+ # Data Loading
22
+ def load_data(data_path, image_size):
23
+ try:
24
+ subfolders = config.labels
25
+ logging.info("Dataset Loading...")
26
+
27
+ img_data_list=[]
28
+ labels_list = []
29
+ num_images_per_class = []
30
+
31
+ for category in subfolders:
32
+ img_list=os.listdir(data_path +'/'+ category)
33
+ if("Annotations" in category):
34
+ continue
35
+
36
+ logging.info(f'Loading : {len(img_list)}, images of category: {category}')
37
+ for img in img_list:
38
+ # Load an image from this path
39
+ img_path = data_path + '/'+ category + '/'+ img
40
+
41
+ # Preprocess image
42
+ img_array=preprocess_image(img_path, image_size)
43
+
44
+ img_data_list.append(img_array)
45
+ labels_list.append(category)
46
+ num_images_per_class.append(len(img_list))
47
+
48
+ le = LabelEncoder()
49
+ labels = le.fit_transform(labels_list)
50
+ labels = to_categorical(labels)
51
+
52
+ # Saving the label encoder object for use during inference
53
+ save_object(config.labelencoder_save_path, le)
54
+
55
+ data = np.array(img_data_list)
56
+
57
+ # Dataset Summary
58
+ logging.info(f"Total number of uploaded data: {data.shape[0]} with data shape, ({data.shape[1]},{data.shape[2]},{data.shape[3]})")
59
+
60
+ logging.info("Initiated train_test_split")
61
+ X_train, X_test, y_train, y_test = initiate_train_test_split(data, labels)
62
+
63
+ return X_train, X_test, y_train, y_test
64
+
65
+ except Exception as e:
66
+ raise CustomException(e, sys)
67
+
68
+ # Train Test Split
69
+ def initiate_train_test_split(data, labels):
70
+
71
+ # Split the dataset into two subsets (80%-20%). The first one will be used for training.
72
+ X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=195, stratify=labels)
73
+
74
+ logging.info(f"X_train has shape: {X_train.shape}")
75
+ logging.info(f"y_train has shape: {y_train.shape}\n")
76
+
77
+ logging.info(f"X_test has shape: {X_test.shape}")
78
+ logging.info(f"y_test has shape: {y_test.shape}\n")
79
+
80
+ logging.info(f"X_train + X_test = {X_train.shape[0] + X_test.shape[0]} samples in total")
81
+
82
+ return X_train, X_test, y_train, y_test
gcg/components/gradcam.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import matplotlib.pyplot as plt
4
+ import tensorflow as tf
5
+ from tensorflow.keras.preprocessing import image
6
+ from tensorflow.keras import Model
7
+ from gcg.utils import logging
8
+
9
+ def grad_cam(model, img,
10
+ layer_name="block5_conv3", label_name=None,
11
+ category_id=None):
12
+ """Get a heatmap by Grad-CAM.
13
+
14
+ Args:
15
+ model: A model object, build from tf.keras 2.X.
16
+ img: An image ndarray.
17
+ layer_name: A string, layer name in model.
18
+ label_name: A list or None,
19
+ show the label name by assign this argument,
20
+ it should be a list of all label names.
21
+ category_id: An integer, index of the class.
22
+ Default is the category with the highest score in the prediction.
23
+
24
+ Return:
25
+ A heatmap ndarray(without color).
26
+ """
27
+ img_tensor = np.expand_dims(img, axis=0)
28
+
29
+ conv_layer = model.get_layer(layer_name)
30
+ heatmap_model = Model([model.inputs], [conv_layer.output, model.output])
31
+
32
+ with tf.GradientTape() as gtape:
33
+ conv_output, predictions = heatmap_model(img_tensor)
34
+ if category_id is None:
35
+ category_id = np.argmax(predictions[0])
36
+ if label_name is not None:
37
+ print(label_name[category_id])
38
+ output = predictions[:, category_id]
39
+ grads = gtape.gradient(output, conv_output)
40
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
41
+
42
+ heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_output), axis=-1)
43
+ heatmap = np.maximum(heatmap, 0)
44
+ max_heat = np.max(heatmap)
45
+ if max_heat == 0:
46
+ max_heat = 1e-10
47
+ heatmap /= max_heat
48
+
49
+ return np.squeeze(heatmap)
50
+
51
+ def grad_cam_plus(model, img,
52
+ layer_name="block5_conv3", label_name=None,
53
+ category_id=None):
54
+ """Get a heatmap by Grad-CAM++.
55
+
56
+ Args:
57
+ model: A model object, build from tf.keras 2.X.
58
+ img: An image ndarray.
59
+ layer_name: A string, layer name in model.
60
+ label_name: A list or None,
61
+ show the label name by assign this argument,
62
+ it should be a list of all label names.
63
+ category_id: An integer, index of the class.
64
+ Default is the category with the highest score in the prediction.
65
+
66
+ Return:
67
+ A heatmap ndarray(without color).
68
+ """
69
+ img_tensor = np.expand_dims(img, axis=0)
70
+
71
+ conv_layer = model.get_layer(layer_name)
72
+ heatmap_model = Model([model.inputs], [conv_layer.output, model.output])
73
+
74
+ with tf.GradientTape() as gtape1:
75
+ with tf.GradientTape() as gtape2:
76
+ with tf.GradientTape() as gtape3:
77
+ conv_output, predictions = heatmap_model(img_tensor)
78
+ if category_id is None:
79
+ category_id = np.argmax(predictions[0])
80
+ if label_name is not None:
81
+ print(label_name[category_id])
82
+ output = predictions[:, category_id]
83
+ conv_first_grad = gtape3.gradient(output, conv_output)
84
+ conv_second_grad = gtape2.gradient(conv_first_grad, conv_output)
85
+ conv_third_grad = gtape1.gradient(conv_second_grad, conv_output)
86
+
87
+ global_sum = np.sum(conv_output, axis=(0, 1, 2))
88
+
89
+ alpha_num = conv_second_grad[0]
90
+ alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum
91
+ alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, 1e-10)
92
+
93
+ alphas = alpha_num/alpha_denom
94
+ alpha_normalization_constant = np.sum(alphas, axis=(0,1))
95
+ alphas /= alpha_normalization_constant
96
+
97
+ weights = np.maximum(conv_first_grad[0], 0.0)
98
+
99
+ deep_linearization_weights = np.sum(weights*alphas, axis=(0,1))
100
+ grad_cam_map = np.sum(deep_linearization_weights*conv_output[0], axis=2)
101
+
102
+ heatmap = np.maximum(grad_cam_map, 0)
103
+ max_heat = np.max(heatmap)
104
+ if max_heat == 0:
105
+ max_heat = 1e-10
106
+ heatmap /= max_heat
107
+
108
+ return heatmap
109
+
110
+
111
+
112
+ def preprocess_image(img_path, image_size=(512, 512, 3)):
113
+ """Preprocess the image by reshape and normalization.
114
+
115
+ Args:
116
+ img_path: A string.
117
+ target_size: A tuple, reshape to this size.
118
+ Return:
119
+ An image array.
120
+ """
121
+ # Read the image from the specified path
122
+ #img = cv2.imread(img_path)
123
+ # Convert the image from BGR to RGB
124
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
125
+ #img_array=cv2.resize(img, (image_size[0],image_size[1]), fx=1, fy=1,interpolation = cv2.INTER_CUBIC)
126
+
127
+ img = image.load_img(img_path, target_size=image_size)
128
+ img = image.img_to_array(img)
129
+
130
+ return img
131
+
132
+ def show_GradCAM(img, heatmap, alpha=0.4, save_path=None, return_array=False):
133
+ """Show the image with heatmap.
134
+
135
+ Args:
136
+ img_path: string.
137
+ heatmap: image array, get it by calling grad_cam().
138
+ alpha: float, transparency of heatmap.
139
+ return_array: bool, return a superimposed image array or not.
140
+ Return:
141
+ None or image array.
142
+ """
143
+
144
+ # Resize the heatmap to match the original image dimensions
145
+ heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
146
+
147
+ # Apply color map to the heatmap
148
+ heatmap = (heatmap * 255).astype("uint8")
149
+ heatmap_colored = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
150
+
151
+ # Create superimposed image
152
+ superimposed_img = heatmap_colored * alpha + img
153
+ superimposed_img = np.clip(superimposed_img, 0, 255).astype("uint8")
154
+
155
+ # Create the combined plot
156
+ fig, axes = plt.subplots(1, 3, figsize=(12, 4))
157
+
158
+ # Remove space around subplots
159
+ fig.subplots_adjust(wspace=0, hspace=0)
160
+
161
+ # Original Image
162
+ axes[0].imshow(img)
163
+ axes[0].set_title('Original Image')
164
+ axes[0].axis('off')
165
+
166
+ # Heatmap
167
+ axes[1].imshow(heatmap_colored)
168
+ axes[1].set_title('Heatmap')
169
+ axes[1].axis('off')
170
+
171
+ # Superimposed Image
172
+ axes[2].imshow(superimposed_img)
173
+ axes[2].set_title('Superimposed Image')
174
+ axes[2].axis('off')
175
+
176
+ # Adjust layout
177
+ plt.tight_layout()
178
+
179
+ # plt.show()
180
+
181
+ # Save the figure if save_path is provided
182
+ if save_path:
183
+ fig.savefig(save_path)
184
+ logging.info(f"Saved combined visualization to {save_path}")
185
+ # cv2.imwrite(save_path, superimposed_img)
186
+
187
+ # Return superimposed image if return_array is True
188
+ if return_array:
189
+ return superimposed_img
gcg/components/model.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.layers import Conv1D, Conv2D, Conv3D
2
+ from keras.layers import Conv2D, LayerNormalization, Layer
3
+ from tensorflow.keras.layers import Reshape, Activation, Softmax, Permute, Add, Dot
4
+ from keras.optimizers import RMSprop
5
+ from tensorflow.keras import layers, models, regularizers
6
+ from tensorflow.keras.applications import EfficientNetV2B0
7
+ from tensorflow.keras.callbacks import ModelCheckpoint
8
+ from keras import ops
9
+ import tensorflow as tf
10
+ import numpy as np
11
+ from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,cohen_kappa_score,roc_auc_score,classification_report
12
+ from gcg import config
13
+ from gcg.utils import CustomException, logging
14
+ import sys
15
+
16
+ class GlobalContextAttention(tf.keras.layers.Layer):
17
+ def __init__(self, reduction_ratio=8, transform_activation='linear', **kwargs):
18
+ """
19
+ Initializes the GlobalContextAttention layer.
20
+
21
+ Args:
22
+ reduction_ratio (int): Reduces the input filters by this factor for the
23
+ bottleneck block of the transform submodule.
24
+ transform_activation (str): Activation function to apply to the output
25
+ of the transform block.
26
+ **kwargs: Additional keyword arguments for the Layer class.
27
+ """
28
+ super(GlobalContextAttention, self).__init__(**kwargs)
29
+ self.reduction_ratio = reduction_ratio
30
+ self.transform_activation = transform_activation
31
+
32
+ def build(self, input_shape):
33
+ """
34
+ Builds the layer by initializing weights and sub-layers.
35
+
36
+ Args:
37
+ input_shape: Shape of the input tensor.
38
+ """
39
+ self.channel_dim = 1 if tf.keras.backend.image_data_format() == 'channels_first' else -1
40
+ self.rank = len(input_shape)
41
+
42
+ # Validate input rank
43
+ if self.rank not in [3, 4, 5]:
44
+ raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial), or 5 (spatio-temporal)')
45
+
46
+ # Calculate the number of channels
47
+ self.channels = input_shape[self.channel_dim]
48
+
49
+ # Initialize sub-layers
50
+ self.conv_context = self._convND_layer(1) # Context modelling block
51
+ self.conv_transform_bottleneck = self._convND_layer(self.channels // self.reduction_ratio) # Transform bottleneck
52
+ self.conv_transform_output = self._convND_layer(self.channels) # Transform output block
53
+
54
+ # Softmax and Dot layers
55
+ self.softmax = Softmax(axis=self._get_flat_spatial_dim())
56
+ self.dot = Dot(axes=(1, 1)) # Dot product over the flattened spatial dimensions
57
+
58
+ # Activation layers
59
+ self.activation_relu = Activation('relu')
60
+ self.activation_transform = Activation(self.transform_activation)
61
+
62
+ # Add layer for final output
63
+ self.add = Add()
64
+
65
+ super(GlobalContextAttention, self).build(input_shape)
66
+
67
+ def call(self, inputs):
68
+ """
69
+ Performs the forward pass of the layer.
70
+
71
+ Args:
72
+ inputs: Input tensor.
73
+
74
+ Returns:
75
+ Output tensor with global context attention applied.
76
+ """
77
+ # Context Modelling Block
78
+ input_flat = self._spatial_flattenND(inputs) # [B, spatial_dims, C]
79
+ context = self.conv_context(inputs) # [B, spatial_dims, 1]
80
+ context = self._spatial_flattenND(context) # [B, spatial_dims, 1]
81
+ context = self.softmax(context) # Apply softmax over spatial_dims
82
+ context = self.dot([input_flat, context]) # [B, C, 1]
83
+ context = self._spatial_expandND(context) # [B, C, 1, 1, ...]
84
+
85
+ # Transform Block
86
+ transform = self.conv_transform_bottleneck(context) # [B, C // R, 1, 1, ...]
87
+ transform = self.activation_relu(transform)
88
+ transform = self.conv_transform_output(transform) # [B, C, 1, 1, ...]
89
+ transform = self.activation_transform(transform)
90
+
91
+ # Apply context transform
92
+ out = self.add([inputs, transform]) # [B, spatial_dims, C]
93
+
94
+ return out
95
+
96
+ def _convND_layer(self, filters):
97
+ """
98
+ Creates a Conv1D, Conv2D, or Conv3D layer based on the input rank.
99
+
100
+ Args:
101
+ filters (int): Number of filters for the convolutional layer.
102
+
103
+ Returns:
104
+ A Conv1D, Conv2D, or Conv3D layer.
105
+ """
106
+ if self.rank == 3:
107
+ return Conv1D(filters, kernel_size=1, padding='same', use_bias=False, kernel_initializer='he_normal')
108
+ elif self.rank == 4:
109
+ return Conv2D(filters, kernel_size=1, padding='same', use_bias=False, kernel_initializer='he_normal')
110
+ elif self.rank == 5:
111
+ return Conv3D(filters, kernel_size=1, padding='same', use_bias=False, kernel_initializer='he_normal')
112
+
113
+ def _spatial_flattenND(self, ip):
114
+ """
115
+ Flattens the spatial dimensions of the input tensor.
116
+
117
+ Args:
118
+ ip: Input tensor.
119
+
120
+ Returns:
121
+ Flattened tensor.
122
+ """
123
+ if self.rank == 3:
124
+ return ip # Identity op for rank 3
125
+ else:
126
+ shape = (ip.shape[self.channel_dim], -1) if self.channel_dim == 1 else (-1, ip.shape[-1])
127
+ return Reshape(shape)(ip)
128
+
129
+ def _spatial_expandND(self, ip):
130
+ """
131
+ Expands the spatial dimensions of the input tensor.
132
+
133
+ Args:
134
+ ip: Input tensor.
135
+
136
+ Returns:
137
+ Expanded tensor.
138
+ """
139
+ if self.rank == 3:
140
+ return Permute((2, 1))(ip) # Identity op for rank 3
141
+ else:
142
+ shape = (-1, *(1 for _ in range(self.rank - 2))) if self.channel_dim == 1 else (*(1 for _ in range(self.rank - 2)), -1)
143
+ return Reshape(shape)(ip)
144
+
145
+ def _get_flat_spatial_dim(self):
146
+ """
147
+ Returns the axis for flattening spatial dimensions.
148
+
149
+ Returns:
150
+ Axis for flattening.
151
+ """
152
+ return 1 if self.channel_dim == 1 else -1
153
+
154
+ def get_config(self):
155
+ """
156
+ Returns the configuration of the layer for serialization.
157
+
158
+ Returns:
159
+ A dictionary containing the layer configuration.
160
+ """
161
+ config = super(GlobalContextAttention, self).get_config()
162
+ config.update({
163
+ 'reduction_ratio': self.reduction_ratio,
164
+ 'transform_activation': self.transform_activation,
165
+ })
166
+ return config
167
+
168
+
169
+ class AttentionGate(Layer):
170
+ def __init__(self, filters, **kwargs):
171
+ self.filters = filters
172
+ super(AttentionGate, self).__init__(**kwargs)
173
+
174
+ def build(self, input_shape):
175
+ # Create trainable parameters for attention gate
176
+ self.conv_xl = Conv2D(self.filters, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')
177
+ self.conv_g = Conv2D(self.filters, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')
178
+ self.psi = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='linear')
179
+ self.layer_norm = LayerNormalization(axis=-1)
180
+
181
+ # Build the child layers
182
+ self.conv_xl.build(input_shape[0]) # Build conv_xl with the shape of xl
183
+ self.conv_g.build(input_shape[1]) # Build conv_g with the shape of g
184
+ self.psi.build(input_shape[0]) # Build psi with the shape of xl
185
+ self.layer_norm.build(input_shape[0]) # Build layer_norm with the shape of xl
186
+
187
+ # Add trainable weights
188
+ self.bxg = self.add_weight(name='bxg',
189
+ shape=(self.filters,),
190
+ initializer='zeros',
191
+ trainable=True)
192
+ self.bpsi = self.add_weight(name='bpsi',
193
+ shape=(1,),
194
+ initializer='zeros',
195
+ trainable=True)
196
+
197
+ super(AttentionGate, self).build(input_shape)
198
+
199
+ def call(self, inputs):
200
+ xl, g = inputs
201
+
202
+ # Apply convolutional operations
203
+ xl_conv = self.conv_xl(xl)
204
+ g_conv = self.conv_g(g)
205
+
206
+ # Compute additive attention
207
+ att = tf.keras.backend.relu(xl_conv + g_conv + self.bxg)
208
+ att = self.layer_norm(att) # Add LayerNormalization
209
+ att = self.psi(att) + self.bpsi
210
+ att = tf.keras.backend.sigmoid(att)
211
+
212
+ # Apply attention gate
213
+ x_hat = att * xl
214
+
215
+ return x_hat
216
+
217
+ def compute_output_shape(self, input_shape):
218
+ return input_shape[0]
219
+
220
+ def get_config(self):
221
+ config = super(AttentionGate, self).get_config()
222
+ config.update({'filters': self.filters})
223
+ return config
224
+
225
+
226
+ class GCRMSprop(RMSprop):
227
+ def get_gradients(self, loss, params):
228
+ # We here just provide a modified get_gradients() function since we are trying to just compute the centralized gradients.
229
+
230
+ grads = []
231
+ gradients = super().get_gradients()
232
+ for grad in gradients:
233
+ grad_len = len(grad.shape)
234
+ if grad_len > 1:
235
+ axis = list(range(grad_len - 1))
236
+ grad -= ops.mean(grad, axis=axis, keep_dims=True)
237
+ grads.append(grad)
238
+
239
+ return grads
240
+
241
+ # Build the model
242
+ def build_model(input_shape, num_classes):
243
+ try:
244
+ logging.info("Loading weights of EfficientNetV2B0...")
245
+ base_model = EfficientNetV2B0(weights='imagenet', include_top=False, input_shape=input_shape)
246
+
247
+ fmaps = base_model.output
248
+
249
+ logging.info("Initializing Global Context Attention...")
250
+ context_fmaps = GlobalContextAttention()(fmaps)
251
+
252
+ logging.info("Initializing AttentionGate...")
253
+ att_fmaps = AttentionGate(fmaps.shape[-1])([fmaps, context_fmaps])
254
+
255
+ x = layers.GlobalAveragePooling2D()(att_fmaps)
256
+
257
+ # First Dense Layer
258
+ x = layers.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.005))(x)
259
+ x = layers.BatchNormalization()(x)
260
+ x = layers.Dropout(0.3)(x)
261
+
262
+ # Second Dense Layer
263
+ x = layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.005))(x)
264
+ x = layers.BatchNormalization()(x)
265
+ x = layers.Dropout(0.2)(x)
266
+
267
+ output = layers.Dense(num_classes, activation='softmax')(x)
268
+
269
+ model = models.Model(inputs=base_model.input, outputs=output)
270
+ model.compile(optimizer=GCRMSprop(learning_rate=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
271
+
272
+ logging.info("Model Built Successfully!")
273
+
274
+ return model
275
+
276
+ except Exception as e:
277
+ raise CustomException(e, sys)
278
+
279
+ # Train the model
280
+ def train_model(model, X_train, X_test, y_train, y_test):
281
+
282
+ try:
283
+ # Define the necessary callbacks
284
+ checkpoint = ModelCheckpoint(config.model_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
285
+
286
+ callbacks = [checkpoint]
287
+
288
+ logging.info(f"Training network for {config.EPOCHS} epochs...")
289
+ hist = model.fit(X_train, y_train, batch_size=config.batch_size,
290
+ validation_data=(X_test, y_test),
291
+ epochs=config.EPOCHS, callbacks=callbacks)
292
+
293
+ return hist
294
+
295
+ except Exception as e:
296
+ raise CustomException(e, sys)
297
+
298
+ # Evaluate the model
299
+ def evaluate_model(model, X_test, y_test):
300
+ try:
301
+
302
+ y_score = model.predict(X_test)
303
+ y_pred = np.argmax(y_score, axis=-1)
304
+ Y_test = np.argmax(y_test, axis=-1)
305
+
306
+ acc = accuracy_score(Y_test,y_pred)
307
+ mpre = precision_score(Y_test,y_pred,average='macro')
308
+ mrecall = recall_score(Y_test,y_pred,average='macro')
309
+ mf1 = f1_score(Y_test,y_pred,average='macro')
310
+ kappa = cohen_kappa_score(Y_test,y_pred,weights='quadratic')
311
+ auc = roc_auc_score(Y_test, y_score, average='macro', multi_class='ovr')
312
+
313
+ logging.info(f"Accuracy: {round(acc*100,2)}")
314
+ logging.info(f"Macro Precision: {round(mpre*100,2)}")
315
+ logging.info(f"Macro Recall: {round(mrecall*100,2)}")
316
+ logging.info(f"Macro F1-Score: {round(mf1*100,2)}")
317
+ logging.info(f"Quadratic Kappa Score: {round(kappa*100,2)}")
318
+ logging.info(f"ROC AUC Score: {round(auc*100,2)}")
319
+ logging.info(classification_report(Y_test, y_pred, digits=4))
320
+
321
+ except Exception as e:
322
+ raise CustomException(e, sys)
323
+
gcg/config.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Get the absolute path to the root directory (where gcg, test_images, etc. are located)
4
+ ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
5
+
6
+ # Dataset details
7
+ data_path = "/Users/tejacherukuri/TReNDS/MyResearch/Datasets/Zenodo-DR7"
8
+ labels = ['No DR', 'Mild NPDR', 'Moderate NPDR', 'Severe NPDR', 'Very Severe NPDR', 'PDR', 'Advanced PDR']
9
+ image_size = (512,512,3)
10
+ num_classes = 7
11
+ labelencoder_save_path = os.path.join(ROOT_DIR, 'saves', 'labelencoder.pkl')
12
+ heatmaps_save_path = os.path.join(ROOT_DIR, 'heatmaps')
13
+
14
+ # Training Parameters
15
+ EPOCHS = 100
16
+ batch_size = 32
17
+ model_path = os.path.join(ROOT_DIR, 'saves', 'gcg.weights.keras')
18
+
19
+ # Attention Layer to get features
20
+ gcg_layer_name = 'attention_gate'
21
+
22
+ #Inference
23
+ test_images = [
24
+ os.path.join(ROOT_DIR, 'test_images', '184_No_DR.jpg'),
25
+ os.path.join(ROOT_DIR, 'test_images', '198_Moderate_NPDR.jpg'),
26
+ os.path.join(ROOT_DIR, 'test_images', '440_Severe_NPDR.jpg'),
27
+ os.path.join(ROOT_DIR, 'test_images', '500_Very_Severe_NPDR.jpg'),
28
+ os.path.join(ROOT_DIR, 'test_images', '635_PDR.jpg'),
29
+ os.path.join(ROOT_DIR, 'test_images', '705_Advanced_PDR.jpg')
30
+ ]
gcg/pipelines/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .inference import predict
gcg/pipelines/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (238 Bytes). View file
 
gcg/pipelines/__pycache__/inference.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
gcg/pipelines/evaluate.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This file is used only for evaluation without the need to train the model
3
+ i.e Loads the best model and evaluates on your test dataset
4
+ '''
5
+
6
+ from gcg.components import load_data, build_model, evaluate_model
7
+ from gcg.utils import load_from_checkpoint, logging
8
+ from gcg import config
9
+
10
+ logging.info("Initiated evaluation pipeline")
11
+
12
+ logging.info(f"Loading data from {config.data_path}")
13
+ X_train, X_test, y_train, y_test = load_data(config.data_path, config.image_size)
14
+
15
+ logging.info("Building model...")
16
+ model = build_model(input_shape=config.image_size, num_classes=7)
17
+
18
+ logging.info("Loading the model checkpoint...")
19
+ model = load_from_checkpoint(model, config.model_path)
20
+
21
+ logging.info("Evaluating the model on test set...")
22
+ evaluate_model(model, X_test, y_test)
gcg/pipelines/inference.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This file is used for inference on new test samples.
3
+ One sample or mutiple samples can be fed to the predict() method
4
+ '''
5
+ import os
6
+ import sys
7
+ from typing import List
8
+ import numpy as np
9
+ from gcg import config
10
+ from gcg.utils import logging, CustomException, load_from_checkpoint, load_object
11
+ from gcg.components import build_model, preprocess_image, grad_cam_plus, show_GradCAM
12
+
13
+ def predict(img_paths:List):
14
+ '''
15
+ Inputs: List of image paths
16
+ Output: Predictions List and Generates heatmaps
17
+ '''
18
+ try:
19
+ predictions_list = []
20
+ # Step 1: Building the model
21
+ model = build_model(input_shape=config.image_size, num_classes=config.num_classes)
22
+
23
+ # Step 2: Loading the model from checkpoint
24
+ logging.info("Loading the model from checkpoint...")
25
+ model = load_from_checkpoint(model, config.model_path)
26
+
27
+ # Step 3: Loading the label encoder to decode indices
28
+ le = load_object(config.labelencoder_save_path)
29
+
30
+ for img_path in img_paths:
31
+ # Step 4: Read and preprocess the image
32
+ img_name = img_path.split("/")[-1]
33
+
34
+ resized_img = preprocess_image(img_path, config.image_size)
35
+ img_array = np.expand_dims(resized_img, axis=0)
36
+
37
+ # Step 5: Inference on the model
38
+ logging.info("Getting your prediction...")
39
+ pred = model.predict(img_array)
40
+ predicted_class = np.argmax(pred, axis=1)[0] # Get class index
41
+
42
+ logging.info(f"Prediction: {le.classes_[predicted_class]}, Path: {img_name}")
43
+ predictions_list.append(le.classes_[predicted_class])
44
+
45
+ # Step 6: Generating heatmap for the image
46
+ logging.info("Generating the heatmap using GradCAM++")
47
+ heatmap_plus = grad_cam_plus(model, resized_img, config.gcg_layer_name, label_name=config.labels, category_id=predicted_class)
48
+
49
+ os.makedirs(config.heatmaps_save_path, exist_ok=True)
50
+ heatmap_img = config.heatmaps_save_path + f'/heatmap_{img_name}'
51
+ show_GradCAM(resized_img, heatmap_plus, save_path=heatmap_img)
52
+
53
+ return predictions_list
54
+
55
+ except Exception as e:
56
+ raise CustomException(e, sys)
57
+
58
+ if __name__=='__main__':
59
+ predictions_list = predict(config.test_images)
60
+ print(predictions_list)
gcg/pipelines/train.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This file is used for end to end training from scratch and then, evaluation.
3
+ '''
4
+
5
+ from gcg.components import load_data, build_model, train_model, evaluate_model
6
+ from gcg import config
7
+ from gcg.utils import logging
8
+
9
+ logging.info("Initiated train pipeline")
10
+
11
+ logging.info(f"Loading data from {config.data_path}")
12
+ X_train, X_test, y_train, y_test = load_data(config.data_path, config.image_size)
13
+
14
+ logging.info("Building model...")
15
+ model = build_model(input_shape=config.image_size, num_classes=7)
16
+
17
+ logging.info("Training the model...")
18
+ train_model(model, X_train, X_test, y_train, y_test)
19
+
20
+ logging.info("Evaluating the model on test set...")
21
+ evaluate_model(model, X_test, y_test)
22
+
gcg/utils/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .exception import CustomException
2
+ from .logger import logging
3
+ from .utils import load_from_checkpoint, load_object, save_object
gcg/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (353 Bytes). View file
 
gcg/utils/__pycache__/exception.cpython-310.pyc ADDED
Binary file (1.11 kB). View file
 
gcg/utils/__pycache__/logger.cpython-310.pyc ADDED
Binary file (639 Bytes). View file
 
gcg/utils/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
gcg/utils/exception.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ # Defining Detailed Error Message
4
+ def error_message_detail(error, error_detail:sys):
5
+ _, _, exc_tb = error_detail.exc_info()
6
+ file_name = exc_tb.tb_frame.f_code.co_filename
7
+ error_message = "Error occured in python script name [{0}] line number [{1}] error message[{2}]".format(
8
+ file_name,
9
+ exc_tb.tb_lineno,
10
+ str(error)
11
+ )
12
+ return error_message
13
+
14
+ # Defining CustomException class
15
+ class CustomException(Exception):
16
+ def __init__(self, error_message, error_detail:sys):
17
+ super().__init__(error_message)
18
+ self.error_message=error_message_detail(error_message, error_detail=error_detail)
19
+
20
+ def __str__(self):
21
+ return self.error_message
gcg/utils/logger.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from datetime import datetime
4
+
5
+ LOG_FILE=f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S')}.log"
6
+ logs_path=os.path.join(os.getcwd(),"logs")
7
+ os.makedirs(logs_path,exist_ok=True)
8
+
9
+ # Create the full log file path
10
+ log_file_path = os.path.join(logs_path, LOG_FILE)
11
+
12
+ # Set up logging configuration
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format="[ %(asctime)s ] - %(filename)s - %(lineno)d - %(name)s - %(levelname)s - %(message)s",
16
+ handlers=[
17
+ logging.FileHandler(log_file_path), # Save logs to a file
18
+ logging.StreamHandler() # Display logs in the console
19
+ ]
20
+ )
gcg/utils/utils.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import joblib
3
+ from gcg.utils import logging, CustomException
4
+
5
+ def load_from_checkpoint(model, model_path):
6
+ try:
7
+ model.load_weights(model_path)
8
+ return model
9
+ except Exception as e:
10
+ raise CustomException(e, sys)
11
+
12
+ def save_object(file_path, object):
13
+ try:
14
+ logging.info("Serializing the object as pickle file")
15
+ joblib.dump(object, file_path)
16
+ except Exception as e:
17
+ raise CustomException(e, sys)
18
+
19
+ def load_object(file_path):
20
+ try:
21
+ logging.info("Deserializing the pickle file as object")
22
+ return joblib.load(file_path)
23
+ except Exception as e:
24
+ raise CustomException(e, sys)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ tensorflow==2.18.0
3
+ keras==3.8.0
4
+ opencv-python==4.11.0.86
5
+ matplotlib==3.10.0
6
+ numpy==2.0.2
7
+ scikit-learn==1.6.1
8
+ -e .
setup.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+ from typing import List
3
+
4
+ HYPEN_E_DOT='-e .'
5
+ def get_requirements(filepath:str) -> List[str]:
6
+ with open(filepath) as file:
7
+ requirements = file.readlines()
8
+ requirements = [req.replace("\n", "") for req in requirements]
9
+ if HYPEN_E_DOT in requirements:
10
+ requirements.remove(HYPEN_E_DOT)
11
+
12
+ return requirements
13
+
14
+ setup(
15
+ name='Guided Context Gating Attention',
16
+ version='0.1.0',
17
+ description='Diabetic Retinopathy Classification based on Attention',
18
+ author='Teja Cherukuri',
19
+ author_email='[email protected]',
20
+ packages=find_packages(),
21
+ install_requires=get_requirements('requirements.txt')
22
+ )