File size: 14,058 Bytes
6086490 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 |
# Need to set up out dir and also how it is being split. Bounding boxes?
# Some basic setup
# Setup detectron2 logger
from sys import argv
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import matplotlib.pyplot as plt
import numpy as np
#from google.cloud import storage
from io import BytesIO
import cv2
from glob import glob
import subprocess
from shlex import quote
import csv
from tqdm import tqdm
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode #I added this
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
import statistics
import random
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
import os
import traceback
numdir = argv[1]
album = argv[2]
# Set Up Models
# the cfg object here is an instantiation of the model. The merge_from_file function gets arguments from a default YAML
# file to configure the model. The functions that follow update certain arguments that were set to default from the YAML file.
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) # define model
cfg.MODEL.WEIGHTS = r"C:\Users\Chase\OneDrive\Documents\service-project\mexico_5_column_weights.pth" # SET UP WEIGHTS HERE
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 5 # 5 classes (5 columns in this instance, but you may have more depending on what you are doing)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model (confidence threshold)
predictor = DefaultPredictor(cfg)
cfg2 = get_cfg()
cfg2.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg2.MODEL.WEIGHTS = r"C:\Users\Chase\OneDrive\Documents\service-project\mexico_5_column_weights.pth" # SET UP WEIGHTS HERE
cfg2.MODEL.DEVICE = 'cpu'
cfg2.MODEL.ROI_HEADS.NUM_CLASSES = 1 # 1 class (Cause of Death in this instance, but you may have more depending on what you are doing)
cfg2.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model
predictor2 = DefaultPredictor(cfg2)
#FUNCTIONS
#This function returns a list of vertical lines found within the image passed to the function.
def get_vertical_lines(img, width=385, line_height=2000, circle = 155): #this function takes as parameter an image and default integers. It returns a list.
'''This function takes an image and default integers as parameters and outputs a list.'''
ys=[]
keepers=[]
n=0
# convert between RGB/BGR and grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# use an Adaptive Thresholding approach where the threshold value = Gaussian weighted sum of the neighborhood values - constant value.
# In other words, it is a weighted sum of the blockSize^2 neighborhood of a point minus the constant.
# in this example, we are setting the maximum threshold value as 255 with the block size of 155 (as set in the "circle" parameter) and the
# constant is 2 (as specified in the last argument)
edges = ~cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,circle,2)
# create a 3x3 matrix of ones.
# An image kernel is a small matrix used to apply effects like the ones you might find in Photoshop or Gimp, such as blurring, sharpening, outlining or embossing. They're also used in machine learning for 'feature extraction', a technique for determining the most important portions of an image.
kernel = np.ones((3, 3), np.uint8)
# The basic idea of erosion is just like soil erosion only, it erodes away the boundaries of foreground object (Always try to keep foreground in white). It is normally performed on binary images. It needs two inputs, one is our original image, second one is called structuring element or kernel which decides the nature of operation. A pixel in the original image (either 1 or 0) will be considered 1 only if all the pixels under the kernel is 1, otherwise it is eroded (made to zero).
th2 = cv2.erode(edges, kernel, iterations=1)
# create a 1x7 matrix of ones.
kernel = np.ones((1, 7), np.uint8)
# The basic idea of dilation is accentuating the features of the images. Whereas erosion is used to reduce the amount of noise in the image, dilation is used to enhance the features of the image.
th3 = cv2.dilate(th2, kernel, iterations=1)
# The Hough Transform is a method that is used in image processing to detect any shape, if that shape can be represented in mathematical form. It can detect the shape even if it is broken or distorted a little bit.
# Any line can be represented in these two terms, (r, ?). Let rows denote the r and columns denote the (?) theta.
# First parameter, Input image should be a binary image, so apply threshold edge detection before finding applying hough transform. In this instance, the "th3" variable will represent our edges.
# Second and third parameters are r and ?(theta) accuracies respectively.
lines = cv2.HoughLines(th3,1,np.pi/180, line_height)
for line in range(len(lines)):
if lines[line][0][1]>-.1 and lines[line][0][1]<.1:
keepers.append(lines[line])
n+=1
for line2 in range(n):
for rho,theta in keepers[line2]:
b = np.sin(theta)
y0 = b*rho
a = np.cos(theta)
x0 = a*rho
x1 = int(x0 + 30*(-b))
y1 = int(y0 + 30*(a))
x2 = int(x0 - 30*(-b))
y2 = int(y0 - 30*(a))
slope = (y2-y1) / (x2-x1)
intercept = y1 - (slope * x1)
side = slope * width + intercept
ys.append(intercept)
ys.append(side)
return ys
#This function returns a list of horizontal lines found in the image passed into the function.
def get_horizontal_lines(img, width=385, line_width=150, circle = 155): #this function takes as parameter and image and default integers. It returns a list.
ys=[]
keepers=[]
n=0
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #converts image to grayscale
edges = ~cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,circle,2)#applies threshold on image
kernel = np.ones((3, 3), np.uint8)
th2 = cv2.erode(edges, kernel, iterations=1)
kernel = np.ones((7, 1), np.uint8)
th3 = cv2.dilate(th2, kernel, iterations=1)
lines = cv2.HoughLines(th3,1,np.pi/180, line_width)
for line in range(len(lines)):
if lines[line][0][1]>1.45 and lines[line][0][1]<1.7:
keepers.append(lines[line])
n+=1
for line2 in range(n):
for rho,theta in keepers[line2]:
b = np.sin(theta)
y0 = b*rho
a = np.cos(theta)
x0 = a*rho
x1 = int(x0 + 30*(-b))
y1 = int(y0 + 30*(a))
x2 = int(x0 - 30*(-b))
y2 = int(y0 - 30*(a))
slope = (y2-y1) / (x2-x1)
intercept = y1 - (slope * x1)
side = slope * width + intercept
ys.append(intercept)
ys.append(side)
return ys
def crop_bot(img, width = 385, line_width_crop = 300):
temp=img[-50:,0:width]
try:
ys = get_horizontal_lines(temp, line_width = line_width_crop)
return img[:img.shape[0]-50+int(np.mean(ys)),0:width]
except:
return img
def make_snippets(img, ys, rows = 50, pixels_per_row = 60, pixels_on_either_side = 15, file_path = "", column = "lit", add_to_end = 0):
start = 0
for y in range(rows):
finish = start + pixels_per_row
x_check = start - pixels_on_either_side
x_check2 = start + pixels_on_either_side
y_check = finish - pixels_on_either_side
y_check2 = finish + pixels_on_either_side
newlist = [x for x in ys if (x > x_check) & (x < x_check2)]
newlist2 = [x for x in ys if (x > y_check) & (x < y_check2)]
if len(newlist)!=0:
start = round(statistics.median(newlist))
if len(newlist2)!=0:
finish = round(statistics.median(newlist2))
if y==rows-1:
snippet=img[start:]
elif y!=rows-1:
snippet=img[start:finish]
start = finish
cv2.imwrite(file_path + "_" + column + "_row_" + str(y+1) + ".jpg", snippet)
# CODE THAT DOES THE SEGMENTATION
bad=[]
files = os.listdir(r'C:/Users/Chase/OneDrive/Documents/34/d32/')[:24]
#files = random.sample(os.listdir(), 4)
for d in tqdm(files):
if d[-4:] == ".jpg":
try:
out_dir = "C:/Users/Chase/OneDrive/Documents/service-project/{}".format(numdir + "/" + album)
im = cv2.imread(d)
outputs = predictor(im)
objects = outputs["instances"].pred_classes
boxes = outputs["instances"].pred_boxes
masks = outputs["instances"].pred_masks
boxes_np = boxes.tensor.cpu().numpy()
obj_np = objects.cpu().numpy()
masks_np = masks.cpu().numpy()
m = 0
for box in range(len(boxes_np)):
left = int(boxes_np[box][0])
top = int(boxes_np[box][1])
right = int(boxes_np[box][2])
bottom = int(boxes_np[box][3])
cropped_array = im[top:bottom,left:right]
mask = masks_np[m][top:bottom,left:right]
h , w = mask.shape
tl = int(np.argwhere(mask[200]==True)[0])
bl = int(np.argwhere(mask[h-200]==True)[0])
white1 = np.zeros([h,w,3],dtype=np.uint8)
white1.fill(255)
white2 = np.zeros([h,w,3],dtype=np.uint8)
white2.fill(255)
change = (tl-bl)/h
white3= (cropped_array * mask[..., None]) + (white1 * ~mask[..., None])
for i in range(h):
start = int(tl - i*change)
if len(np.argwhere(mask[i]==True))>0:
last = int(np.argwhere(mask[i]==True)[-1])
elif len(np.argwhere(mask[i]==True))==0:
last = w-start
white2[i][0:last-start] = white3[i][start:last]
if obj_np[m] == 0:
white3=white2[:,0:60]
outputs2 = predictor2(white3)
boxes2 = outputs2["instances"].pred_boxes
boxes_np2 = boxes2.tensor.cpu().numpy()
bottom2 = int(boxes_np2[0][3])
no_top=white3[bottom2:,:]
no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lit1')
elif obj_np[m] == 1:
white3=white2[:,0:60]
outputs2 = predictor2(white3)
boxes2 = outputs2["instances"].pred_boxes
boxes_np2 = boxes2.tensor.cpu().numpy()
bottom2 = int(boxes_np2[0][3])
no_top=white3[bottom2:,:]
no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lit2')
elif obj_np[m] == 2:
white3=white2[:,0:60]
outputs2 = predictor2(white3)
boxes2 = outputs2["instances"].pred_boxes
boxes_np2 = boxes2.tensor.cpu().numpy()
bottom2 = int(boxes_np2[0][3])
no_top=white3[bottom2:,:]
no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lang1')
elif obj_np[m] == 3:
white3=white2[:,0:350]
outputs2 = predictor2(white3)
boxes2 = outputs2["instances"].pred_boxes
boxes_np2 = boxes2.tensor.cpu().numpy()
bottom2 = int(boxes_np2[0][3])
no_top=white3[bottom2:,:]
no_bot_or_top = crop_bot(no_top, line_width_crop=265)
no_bot_or_top = cv2.resize(no_bot_or_top,(350,3000))
ys = get_horizontal_lines(no_bot_or_top,width=350, line_width=265)
make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lang2')
elif obj_np[m] == 4:
white3=white2[:,0:225]
outputs2 = predictor2(white3)
boxes2 = outputs2["instances"].pred_boxes
boxes_np2 = boxes2.tensor.cpu().numpy()
bottom2 = int(boxes_np2[0][3])
no_top=white3[bottom2:,:]
no_bot_or_top = crop_bot(no_top, line_width_crop=300)
no_bot_or_top = cv2.resize(no_bot_or_top,(225,3000))
ys = get_horizontal_lines(no_bot_or_top,width=225, line_width=150)
make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'rel')
m += 1
except KeyboardInterrupt:
exit(1)
except:
bad.append(d)
traceback.print_exc()
print("image failed: " + d)
pass
print("Percent Error: " + str(len(bad)/len(files)))
print(bad)
with open(f'C:/Users/Chase/OneDrive/Documents/service-project/{numdir}.csv', 'a') as output:
# /home/jmorri33/fsl_groups/fslg_census/compute/projects/Mexico_Census/error_img/mexico_error_62.csv
# ../../../../error_img
writer = csv.writer(output, delimiter=',')
writer.writerow(bad)
|