Skip to content

Instantly share code, notes, and snippets.

@hnguyentt
Last active April 8, 2021 14:13
Show Gist options
  • Select an option

  • Save hnguyentt/d49564875234f6722ff89e65db34a00b to your computer and use it in GitHub Desktop.

Select an option

Save hnguyentt/d49564875234f6722ff89e65db34a00b to your computer and use it in GitHub Desktop.
"""
Generate GradCAM for COVID-Net model: https://github.com/lindawangg/COVID-Net
By: Hoa Nguyen
Last updated: May 23, 2020
"""
# Dependencies
import numpy as np
import tensorflow as tf
import os, argparse
import cv2
import pandas as pd
from tqdm import tqdm
import json
parser = argparse.ArgumentParser(description='COVID-Net Inference')
parser.add_argument('-model', default="COVID-Net-Small.json", help="Path to model specification")
parser.add_argument('-impath', default='./data/test', help='Path to image')
parser.add_argument('-pred_class', help='Prediction result by COVID-Net. Should be: normal | pneumonia | COVID-19')
parser.add_argument('-outdir', help="Output directory")
args = parser.parse_args()
model_info = json.load(open(args.model))
weightspath = model_info["weightspath"]
metaname = model_info["metaname"]
ckptname = model_info["ckptname"]
mapping = {'normal': 0, 'pneumonia': 1, 'COVID-19': 2}
inv_mapping = {0: 'normal', 1: 'pneumonia', 2: 'COVID-19'}
def crop_top(img, percent=0.15):
offset = int(img.shape[0] * percent)
return img[offset:]
def central_crop(img):
size = min(img.shape[0], img.shape[1])
offset_h = int((img.shape[0] - size) / 2)
offset_w = int((img.shape[1] - size) / 2)
return img[offset_h:offset_h + size, offset_w:offset_w + size]
def process_image_file(filepath, top_percent, size):
if filepath.split(".")[-1] == "npy":
x_arr = np.load(filepath)
x_arr = x_arr * 255.0 / x_arr.max()
img = np.stack((x_arr, x_arr, x_arr), axis=-1)
else:
img = cv2.imread(filepath)
processed_img = crop_top(img, percent=top_percent)
processed_img = central_crop(processed_img)
processed_img = cv2.resize(processed_img, (size, size))
return processed_img, img
class GradCAM:
def __init__(self, graph, classes, outLayer, targetLayer=None):
self.graph = graph
self.classes = classes
self.targetLayer = targetLayer
self.outLayer = outLayer
if self.targetLayer is None:
self.target = self.find_target_tensor()
else:
self.target = self.graph.get_tensor_by_name(self.targetLayer)
def find_target_tensor(self):
"""
Find the last tensor that have 4D shape if targetLayer is not specified.
:return:
"""
tensor_names = [t.name for op in tf.get_default_graph().get_operations() for t in op.values() if
"save" not in str(t.name)]
for tensor_name in reversed(tensor_names):
tensor = self.graph.get_tensor_by_name(tensor_name)
if len(tensor.shape) == 4:
return tensor
raise ValueError("Could not find 4D layer. Cannot apply GradCAM")
def compute_grads(self):
results = {} # grads of classes with keys being classes and values being normalized gradients
for classIdx in self.classes:
one_hot = tf.sparse_to_dense(classIdx, [len(self.classes)], 1.0)
signal = tf.multiply(self.graph.get_tensor_by_name(self.outLayer),one_hot)
loss = tf.reduce_mean(signal)
grads = tf.gradients(loss, self.target)[0]
norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads)))+tf.constant(1e-5))
results[classIdx] = norm_grads
return results
def generate_cam(conv_layer_out, grads_val, upsample_size):
weights = np.mean(grads_val, axis=(0,1))
cam = np.zeros(conv_layer_out.shape[0:2], dtype=np.float32)
# Weight averaginng
for i, w in enumerate(weights):
cam += w*conv_layer_out[:,:,i]
# Apply reLU
cam = np.maximum(cam, 0)
cam = cam/np.max(cam)
cam = cv2.resize(cam, upsample_size)
# Convert to 3D
cam3 = np.expand_dims(cam, axis=2)
cam3 = np.tile(cam3,[1,1,3])
return cam3
if __name__ == '__main__':
sess = tf.Session()
tf.get_default_graph()
saver = tf.train.import_meta_graph(os.path.join(weightspath, metaname))
saver.restore(sess, os.path.join(weightspath, ckptname))
graph = tf.get_default_graph()
image_tensor = graph.get_tensor_by_name(model_info["input_tensor"])
gradCam = GradCAM(graph=graph, classes = [0,1,2], outLayer=model_info["output_tensor"], targetLayer=model_info["final_conv_tensor"])
grads = gradCam.compute_grads()
x, origin_im = process_image_file(args.impath, model_info["top_percent"], model_info["input_size"])
img_arr = np.asanyarray(x)
size_upsample = (origin_im.shape[1],origin_im.shape[0]) # (w, h)
x = x.astype('float32') / 255.0
output, grads_val = sess.run([gradCam.target, grads[mapping[args.pred_class]]], feed_dict={image_tensor: np.expand_dims(x, axis=0)})
cam3 = generate_cam(output[0],grads_val[0],size_upsample)
# Overlay cam on image
cam3 = np.uint8(255*cam3)
cam3 = cv2.applyColorMap(cam3, cv2.COLORMAP_JET)
new_im = cam3*0.3 + origin_im*0.5
im_name = args.impath.split("/")[-1])
ext = im_name.split(".")[-1]
# Save the GradCAM
cv2.imwrite(os.path.join(args.outdir, "{}_{}png".format(args.pred_class, im_name.rstrip(ext))), new_im)
print("GradCAM image is save in ", args.outdir)
@khanbhai0078
Copy link

How this filepath works??
def process_image_file(filepath, top_percent, size):
if filepath.split(".")[-1] == "npy":
x_arr = np.load(filepath)
x_arr = x_arr * 255.0 / x_arr.max()
img = np.stack((x_arr, x_arr, x_arr), axis=-1)
else:
img = cv2.imread(filepath)

@hnguyentt
Copy link
Author

How this filepath works??
def process_image_file(filepath, top_percent, size):
if filepath.split(".")[-1] == "npy":
x_arr = np.load(filepath)
x_arr = x_arr * 255.0 / x_arr.max()
img = np.stack((x_arr, x_arr, x_arr), axis=-1)
else:
img = cv2.imread(filepath)

That is that path to image file.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment