Spaces:
Sleeping
Sleeping
import keras | |
import numpy as np | |
import pandas as pd | |
import gradio as gr | |
import os | |
from keras.applications.densenet import DenseNet121 | |
from keras.layers import Dense, GlobalAveragePooling2D | |
from keras.models import Model | |
med_labels = ['Cardiomegaly', | |
'Emphysema', | |
'Effusion', | |
'Hernia', | |
'Infiltration', | |
'Mass', | |
'Nodule', | |
'Atelectasis', | |
'Pneumothorax', | |
'Pleural_Thickening', | |
'Pneumonia', | |
'Fibrosis', | |
'Edema', | |
'Consolidation'] | |
def get_weighted_loss(pos_weights, neg_weights, epsilon=1e-7): | |
""" | |
Return weighted loss function given negative weights and positive weights. | |
Args: | |
pos_weights (np.array): array of positive weights for each class, size (num_classes) | |
neg_weights (np.array): array of negative weights for each class, size (num_classes) | |
Returns: | |
weighted_loss (function): weighted loss function | |
""" | |
def weighted_loss(y_true, y_pred): | |
""" | |
Return weighted loss value. | |
Args: | |
y_true (Tensor): Tensor of true labels, size is (num_examples, num_classes) | |
y_pred (Tensor): Tensor of predicted labels, size is (num_examples, num_classes) | |
Returns: | |
loss (float): overall scalar loss summed across all classes | |
""" | |
# initialize loss to zero | |
loss = 0.0 | |
for i in range(len(pos_weights)): | |
positive_term_loss = pos_weights[i] * tf.cast(y_true[:,i], tf.float32) * K.log(y_pred[:,i] + epsilon) | |
negative_term_loss = neg_weights[i] * tf.cast((1-y_true[:,i]), tf.float32) * K.log(1-y_pred[:,i] + epsilon) | |
loss += -K.mean(positive_term_loss + negative_term_loss) | |
return loss | |
return weighted_loss | |
freq_neg = np.loadtxt('freq_neg.txt') | |
freq_pos = np.loadtxt('freq_pos.txt') | |
pos_weights = freq_neg | |
neg_weights = freq_pos | |
# create the base pre-trained model | |
base_model = DenseNet121(weights='./nih/densenet.hdf5', include_top=False) | |
x = base_model.output | |
# add a global spatial average pooling layer | |
x = GlobalAveragePooling2D()(x) | |
# and a logistic layer | |
predictions = Dense(len(med_labels), activation="sigmoid")(x) | |
model = Model(inputs=base_model.input, outputs=predictions) | |
model.compile(optimizer='adam', loss=get_weighted_loss(pos_weights, neg_weights)) | |
model.load_weights("./nih/pretrained_model.h5") | |
import os | |
import tensorflow as tf | |
from tensorflow import keras | |
from IPython.display import Image, display | |
import matplotlib.cm as cm | |
def convert_preds(preds): | |
q = dict(zip(med_labels, preds[0])) | |
return q | |
# The Grad-CAM algorithm | |
def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None): | |
# First, we create a model that maps the input image to the activations | |
# of the last conv layer as well as the output predictions | |
grad_model = keras.models.Model( | |
model.inputs, [model.get_layer(last_conv_layer_name).output, model.output] | |
) | |
# Then, we compute the gradient of the top predicted class for our input image | |
# with respect to the activations of the last conv layer | |
with tf.GradientTape() as tape: | |
last_conv_layer_output, preds = grad_model(img_array) | |
if pred_index is None: | |
pred_index = tf.argmax(preds[0]) | |
class_channel = preds[:, pred_index] | |
# This is the gradient of the output neuron (top predicted or chosen) | |
# with regard to the output feature map of the last conv layer | |
grads = tape.gradient(class_channel, last_conv_layer_output) | |
# This is a vector where each entry is the mean intensity of the gradient | |
# over a specific feature map channel | |
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) | |
# We multiply each channel in the feature map array | |
# by "how important this channel is" with regard to the top predicted class | |
# then sum all the channels to obtain the heatmap class activation | |
last_conv_layer_output = last_conv_layer_output[0] | |
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] | |
heatmap = tf.squeeze(heatmap) | |
# For visualization purpose, we will also normalize the heatmap between 0 & 1 | |
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) | |
return heatmap.numpy() | |
# Create a superimposed visualization | |
def superimpose_gradcam(img_path, heatmap, alpha=0.5): | |
# Load the original image | |
img = keras.utils.load_img(img_path) | |
img = keras.utils.img_to_array(img) | |
# Rescale heatmap to a range 0-255 | |
heatmap = np.uint8(255 * heatmap) | |
# Use jet colormap to colorize heatmap | |
jet = cm.get_cmap("jet") | |
# Use RGB values of the colormap | |
jet_colors = jet(np.arange(256))[:, :3] | |
jet_heatmap = jet_colors[heatmap] | |
# Create an image with RGB colorized heatmap | |
jet_heatmap = keras.utils.array_to_img(jet_heatmap) | |
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) | |
jet_heatmap = keras.utils.img_to_array(jet_heatmap) | |
# Superimpose the heatmap on original image | |
superimposed_img = jet_heatmap * alpha + img * 0.4 | |
superimposed_img = keras.utils.array_to_img(superimposed_img) | |
return superimposed_img | |
# Save the superimposed image | |
# superimposed_img.save(cam_path) | |
# # Display Grad CAM | |
# display(Image(cam_path,width=300)) | |
def pil_to_np(pil): | |
a = np.array(pil) | |
return a | |
def np_to_pil(a): | |
from PIL import Image | |
im = Image.fromarray(a) #, mode="RGB" | |
return im | |
from keras.preprocessing import image | |
def load_image_to_array(image_path, H=320, W=320): | |
pil = image.load_img( | |
image_path, | |
target_size=(H, W), | |
color_mode = 'rgb', | |
interpolation = 'nearest', | |
) | |
a = pil_to_np(pil) | |
return a | |
def normalize_array(a): | |
pil = np_to_pil(a) | |
mean = np.mean(pil) | |
std = np.std(pil) | |
pil -= mean | |
pil /= std | |
a2 = pil_to_np(pil) | |
a2 = np.expand_dims(a2, axis=0) | |
return a2 | |
selected_keys = ['Cardiomegaly','Mass','Pneumothorax','Edema'] | |
# selected_keys.append('Infiltration') | |
def print_selected(preds): | |
for k in selected_keys: | |
print('{:15}\t{:6.3f}'.format(k, preds[k])) | |
IMAGE_DIR = "nih/images-small/" | |
last_conv_layer_name = 'bn' | |
def med_classify_image(inp): | |
inp = load_image_to_array(inp) | |
inp = normalize_array(inp) | |
preds = model.predict(inp,verbose=0) | |
preds = convert_preds(preds) | |
preds = {key:value.item() for key, value in preds.items()} | |
return preds | |
def gradcam(inp): | |
selected_labels = [ | |
(idx, label) | |
for idx, label in enumerate(med_labels) | |
if label in selected_keys] | |
img_array = load_image_to_array(inp) | |
img_array = normalize_array(img_array) | |
images = [] | |
for k, l in selected_labels: | |
heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index = k) | |
superimposed_img = superimpose_gradcam(inp, heatmap) | |
images.append((superimposed_img,l)) | |
return images | |
with gr.Blocks() as demo: | |
gr.Markdown('# Chest X-Ray Medical Diagnosis with Deep Learning') | |
with gr.Row(): | |
input_image = gr.Image(label='Chest X-Ray',type='filepath',image_mode='L') | |
with gr.Column(): | |
gr.Examples( | |
examples=[ | |
"nih/images-small/00008270_015.png", | |
"nih/images-small/00011355_002.png", | |
"nih/images-small/00029855_001.png", | |
"nih/images-small/00005410_000.png", | |
], | |
inputs=input_image, | |
label='Examples' | |
# fn=mirror, | |
# cache_examples=True, | |
) | |
with gr.Column(): | |
b1 = gr.Button("Classify") | |
b2 = gr.Button("Compute GradCam") | |
with gr.Row(): | |
label = gr.Label(label='Classification',num_top_classes=5) | |
gallery = gr.Gallery( | |
label="GradCam", | |
show_label=True, | |
elem_id="gallery", | |
object_fit="scale-down", | |
height=400) | |
gr.Markdown( | |
""" | |
[ChestX-ray8 dataset](https://arxiv.org/abs/1705.02315) | |
[Download the entire dataset](https://nihcc.app.box.com/v/ChestXray-NIHCC) | |
""") | |
b1.click(med_classify_image, inputs=input_image, outputs=label) | |
b2.click(gradcam, inputs=input_image, outputs=gallery) | |
if __name__ == "__main__": | |
demo.launch() | |