|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
import math |
|
from matplotlib import pylab as plt |
|
import time |
|
import utility |
|
import debayer |
|
import sys |
|
from scipy import signal |
|
from scipy import interpolate |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ImageInfo: |
|
def __init__(self, name = "unknown", data = -1, is_show = False): |
|
self.name = name |
|
self.data = data |
|
self.size = np.shape(self.data) |
|
self.is_show = is_show |
|
self.color_space = "unknown" |
|
self.bayer_pattern = "unknown" |
|
self.channel_gain = (1.0, 1.0, 1.0, 1.0) |
|
self.bit_depth = 0 |
|
self.black_level = (0, 0, 0, 0) |
|
self.white_level = (1, 1, 1, 1) |
|
self.color_matrix = [[1., .0, .0],\ |
|
[.0, 1., .0],\ |
|
[.0, .0, 1.]] |
|
self.min_value = np.min(self.data) |
|
self.max_value = np.max(self.data) |
|
self.data_type = self.data.dtype |
|
|
|
|
|
if (self.is_show): |
|
plt.imshow(self.data) |
|
plt.show() |
|
|
|
def set_data(self, data): |
|
|
|
self.data = data |
|
self.size = np.shape(self.data) |
|
self.data_type = self.data.dtype |
|
self.min_value = np.min(self.data) |
|
self.max_value = np.max(self.data) |
|
|
|
def get_size(self): |
|
return self.size |
|
|
|
def get_width(self): |
|
return self.size[1] |
|
|
|
def get_height(self): |
|
return self.size[0] |
|
|
|
def get_depth(self): |
|
if np.ndim(self.data) > 2: |
|
return self.size[2] |
|
else: |
|
return 0 |
|
|
|
def set_color_space(self, color_space): |
|
self.color_space = color_space |
|
|
|
def get_color_space(self): |
|
return self.color_space |
|
|
|
def set_channel_gain(self, channel_gain): |
|
self.channel_gain = channel_gain |
|
|
|
def get_channel_gain(self): |
|
return self.channel_gain |
|
|
|
def set_color_matrix(self, color_matrix): |
|
self.color_matrix = color_matrix |
|
|
|
def get_color_matrix(self): |
|
return self.color_matrix |
|
|
|
def set_bayer_pattern(self, bayer_pattern): |
|
self.bayer_pattern = bayer_pattern |
|
|
|
def get_bayer_pattern(self): |
|
return self.bayer_pattern |
|
|
|
def set_bit_depth(self, bit_depth): |
|
self.bit_depth = bit_depth |
|
|
|
def get_bit_depth(self): |
|
return self.bit_depth |
|
|
|
def set_black_level(self, black_level): |
|
self.black_level = black_level |
|
|
|
def get_black_level(self): |
|
return self.black_level |
|
|
|
def set_white_level(self, white_level): |
|
self.white_level = white_level |
|
|
|
def get_white_level(self): |
|
return self.white_level |
|
|
|
def get_min_value(self): |
|
return self.min_value |
|
|
|
def get_max_value(self): |
|
return self.max_value |
|
|
|
def get_data_type(self): |
|
return self.data_type |
|
|
|
def __str__(self): |
|
return "Image " + self.name + " info:" + \ |
|
"\n\tname:\t" + self.name + \ |
|
"\n\tsize:\t" + str(self.size) + \ |
|
"\n\tcolor space:\t" + self.color_space + \ |
|
"\n\tbayer pattern:\t" + self.bayer_pattern + \ |
|
"\n\tchannel gains:\t" + str(self.channel_gain) + \ |
|
"\n\tbit depth:\t" + str(self.bit_depth) + \ |
|
"\n\tdata type:\t" + str(self.data_type) + \ |
|
"\n\tblack level:\t" + str(self.black_level) + \ |
|
"\n\tminimum value:\t" + str(self.min_value) + \ |
|
"\n\tmaximum value:\t" + str(self.max_value) |
|
|
|
|
|
|
|
|
|
|
|
|
|
def black_level_correction(raw, black_level, white_level, clip_range): |
|
|
|
print("----------------------------------------------------") |
|
print("Running black level correction...") |
|
|
|
|
|
black_level = np.float32(black_level) |
|
white_level = np.float32(white_level) |
|
raw = np.float32(raw) |
|
|
|
|
|
data = np.zeros(raw.shape) |
|
|
|
|
|
data[::2, ::2] = (raw[::2, ::2] - black_level[0]) / (white_level[0] - black_level[0]) |
|
data[::2, 1::2] = (raw[::2, 1::2] - black_level[1]) / (white_level[1] - black_level[1]) |
|
data[1::2, ::2] = (raw[1::2, ::2] - black_level[2]) / (white_level[2] - black_level[2]) |
|
data[1::2, 1::2] = (raw[1::2, 1::2]- black_level[3]) / (white_level[3] - black_level[3]) |
|
|
|
|
|
data = data * clip_range[1] |
|
|
|
|
|
data = np.clip(data, clip_range[0], clip_range[1]) |
|
data = np.float32(data) |
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
def channel_gain_white_balance(data, channel_gain): |
|
|
|
print("----------------------------------------------------") |
|
print("Running channel gain white balance...") |
|
|
|
|
|
data = np.float32(data) |
|
channel_gain = np.float32(channel_gain) |
|
|
|
|
|
data[::2, ::2] = data[::2, ::2] * channel_gain[0] |
|
data[::2, 1::2] = data[::2, 1::2] * channel_gain[1] |
|
data[1::2, ::2] = data[1::2, ::2] * channel_gain[2] |
|
data[1::2, 1::2] = data[1::2, 1::2] * channel_gain[3] |
|
|
|
|
|
data = np.clip(data, 0., None) |
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
def bad_pixel_correction(data, neighborhood_size): |
|
|
|
print("----------------------------------------------------") |
|
print("Running bad pixel correction...") |
|
|
|
if ((neighborhood_size % 2) == 0): |
|
print("neighborhood_size shoud be odd number, recommended value 3") |
|
return data |
|
|
|
|
|
|
|
data = np.float32(data) |
|
|
|
|
|
D = {} |
|
D[0] = data[::2, ::2] |
|
D[1] = data[::2, 1::2] |
|
D[2] = data[1::2, ::2] |
|
D[3] = data[1::2, 1::2] |
|
|
|
|
|
no_of_pixel_pad = math.floor(neighborhood_size / 2.) |
|
|
|
for idx in range(0, len(D)): |
|
|
|
|
|
print("bad pixel correction: Quarter " + str(idx+1) + " of 4") |
|
|
|
img = D[idx] |
|
width, height = utility.helpers(img).get_width_height() |
|
|
|
|
|
img = np.pad(img, \ |
|
(no_of_pixel_pad, no_of_pixel_pad),\ |
|
'reflect') |
|
|
|
for i in range(no_of_pixel_pad, height + no_of_pixel_pad): |
|
for j in range(no_of_pixel_pad, width + no_of_pixel_pad): |
|
|
|
|
|
mid_pixel_val = img[i, j] |
|
|
|
|
|
neighborhood = img[i - no_of_pixel_pad : i + no_of_pixel_pad+1,\ |
|
j - no_of_pixel_pad : j + no_of_pixel_pad+1] |
|
|
|
|
|
|
|
|
|
neighborhood[no_of_pixel_pad, no_of_pixel_pad] = neighborhood[no_of_pixel_pad, no_of_pixel_pad-1] |
|
|
|
min_neighborhood = np.min(neighborhood) |
|
max_neighborhood = np.max(neighborhood) |
|
|
|
if (mid_pixel_val < min_neighborhood): |
|
img[i,j] = min_neighborhood |
|
elif (mid_pixel_val > max_neighborhood): |
|
img[i,j] = max_neighborhood |
|
else: |
|
img[i,j] = mid_pixel_val |
|
|
|
|
|
D[idx] = img[no_of_pixel_pad : height + no_of_pixel_pad,\ |
|
no_of_pixel_pad : width + no_of_pixel_pad] |
|
|
|
|
|
data[::2, ::2] = D[0] |
|
data[::2, 1::2] = D[1] |
|
data[1::2, ::2] = D[2] |
|
data[1::2, 1::2] = D[3] |
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
class demosaic: |
|
def __init__(self, data, bayer_pattern="rggb", clip_range=[0, 65535], name="demosaic"): |
|
self.data = np.float32(data) |
|
self.bayer_pattern = bayer_pattern |
|
self.clip_range = clip_range |
|
self.name = name |
|
|
|
def mhc(self, timeshow=False): |
|
|
|
print("----------------------------------------------------") |
|
print("Running demosaicing using Malvar-He-Cutler algorithm...") |
|
|
|
return debayer.debayer_mhc(self.data, self.bayer_pattern, self.clip_range, timeshow) |
|
|
|
def post_process_local_color_ratio(self, beta): |
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Demosaicing post process using local color ratio...") |
|
|
|
data = self.data |
|
|
|
|
|
data_beta = self.data + beta |
|
|
|
|
|
|
|
zeta1 = np.multiply([[0., 1., 0.], [1., 0., 1.], [0., 1., 0.]], .25) |
|
|
|
zeta2 = np.multiply([[1., 0., 1.], [0., 0., 0.], [1., 0., 1.]], .25) |
|
|
|
|
|
g_over_b = signal.convolve2d(np.divide(data_beta[:, :, 1], data_beta[:, :, 2]), zeta1, mode="same", boundary="symm") |
|
g_over_r = signal.convolve2d(np.divide(data_beta[:, :, 1], data_beta[:, :, 0]), zeta1, mode="same", boundary="symm") |
|
b_over_g_zeta2 = signal.convolve2d(np.divide(data_beta[:, :, 2], data_beta[:, :, 1]), zeta2, mode="same", boundary="symm") |
|
r_over_g_zeta2 = signal.convolve2d(np.divide(data_beta[:, :, 0], data_beta[:, :, 1]), zeta2, mode="same", boundary="symm") |
|
b_over_g_zeta1 = signal.convolve2d(np.divide(data_beta[:, :, 2], data_beta[:, :, 1]), zeta1, mode="same", boundary="symm") |
|
r_over_g_zeta1 = signal.convolve2d(np.divide(data_beta[:, :, 0], data_beta[:, :, 1]), zeta1, mode="same", boundary="symm") |
|
|
|
|
|
if self.bayer_pattern == "rggb": |
|
|
|
data[1::2, 1::2, 1] = -beta + np.multiply(data_beta[1::2, 1::2, 2], g_over_b[1::2, 1::2]) |
|
|
|
data[::2, ::2, 1] = -beta + np.multiply(data_beta[::2, ::2, 0], g_over_r[::2, ::2]) |
|
|
|
data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta2[::2, ::2]) |
|
|
|
data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta2[1::2, 1::2]) |
|
|
|
data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta1[::2, 1::2]) |
|
data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta1[1::2, ::2]) |
|
|
|
data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta1[::2, 1::2]) |
|
data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta1[1::2, ::2]) |
|
|
|
elif self.bayer_pattern == "grbg": |
|
|
|
data[1::2, ::2, 1] = -beta + np.multiply(data_beta[1::2, ::2, 2], g_over_b[1::2, 1::2]) |
|
|
|
data[::2, 1::2, 1] = -beta + np.multiply(data_beta[::2, 1::2, 0], g_over_r[::2, 1::2]) |
|
|
|
data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta2[::2, 1::2]) |
|
|
|
data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta2[1::2, ::2]) |
|
|
|
data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta1[::2, ::2]) |
|
data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta1[1::2, 1::2]) |
|
|
|
data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta1[::2, ::2]) |
|
data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta1[1::2, 1::2]) |
|
|
|
elif self.bayer_pattern == "gbrg": |
|
|
|
data[::2, 1::2, 1] = -beta + np.multiply(data_beta[::2, 1::2, 2], g_over_b[::2, 1::2]) |
|
|
|
data[1::2, ::2, 1] = -beta + np.multiply(data_beta[1::2, ::2, 0], g_over_r[1::2, ::2]) |
|
|
|
data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta2[1::2, ::2]) |
|
|
|
data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta2[::2, 1::2]) |
|
|
|
data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta1[::2, ::2]) |
|
data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta1[1::2, 1::2]) |
|
|
|
data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta1[::2, ::2]) |
|
data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta1[1::2, 1::2]) |
|
|
|
elif self.bayer_pattern == "bggr": |
|
|
|
data[::2, ::2, 1] = -beta + np.multiply(data_beta[::2, ::2, 2], g_over_b[::2, ::2]) |
|
|
|
data[1::2, 1::2, 1] = -beta + np.multiply(data_beta[1::2, 1::2, 0], g_over_r[1::2, 1::2]) |
|
|
|
data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta2[1::2, 1::2]) |
|
|
|
data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta2[::2, ::2]) |
|
|
|
data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta1[::2, 1::2]) |
|
data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta1[1::2, ::2]) |
|
|
|
data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta1[::2, 1::2]) |
|
data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta1[1::2, ::2]) |
|
|
|
|
|
return np.clip(data, self.clip_range[0], self.clip_range[1]) |
|
|
|
|
|
def directionally_weighted_gradient_based_interpolation(self): |
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running demosaicing using directionally weighted gradient based interpolation...") |
|
|
|
|
|
G = debayer.fill_channel_directional_weight(self.data, self.bayer_pattern) |
|
|
|
B, R = debayer.fill_br_locations(self.data, G, self.bayer_pattern) |
|
|
|
width, height = utility.helpers(self.data).get_width_height() |
|
output = np.empty((height, width, 3), dtype=np.float32) |
|
output[:, :, 0] = R |
|
output[:, :, 1] = G |
|
output[:, :, 2] = B |
|
|
|
return np.clip(output, self.clip_range[0], self.clip_range[1]) |
|
|
|
|
|
def post_process_median_filter(self, edge_detect_kernel_size=3, edge_threshold=0, median_filter_kernel_size=3, clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
edge_location = utility.edge_detection(self.data).sobel(edge_detect_kernel_size, "is_edge", edge_threshold, clip_range) |
|
|
|
|
|
output = np.empty(np.shape(self.data), dtype=np.float32) |
|
|
|
if (np.ndim(self.data) > 2): |
|
|
|
for i in range(0, np.shape(self.data)[2]): |
|
output[:, :, i] = utility.helpers(self.data[:, :, i]).edge_wise_median(median_filter_kernel_size, edge_location[:, :, i]) |
|
|
|
elif (np.ndim(self.data) == 2): |
|
output = utility.helpers(self.data).edge_wise_median(median_filter_kernel_size, edge_location) |
|
|
|
return output, edge_location |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class lens_shading_correction: |
|
def __init__(self, data, name="lens_shading_correction"): |
|
|
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def flat_field_compensation(self, dark_current_image, flat_field_image): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running lens shading correction with flat field compensation...") |
|
|
|
|
|
dark_current_image = np.float32(dark_current_image) |
|
flat_field_image = np.float32(flat_field_image) |
|
temp = flat_field_image - dark_current_image |
|
return np.average(temp) * np.divide((self.data - dark_current_image), temp) |
|
|
|
def approximate_mathematical_compensation(self, params, clip_min=0, clip_max=65535): |
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running lens shading correction with approximate mathematical compensation...") |
|
width, height = utility.helpers(self.data).get_width_height() |
|
|
|
center_pixel_pos = [height/2, width/2] |
|
max_distance = utility.distance_euclid(center_pixel_pos, [height, width]) |
|
|
|
|
|
temp = np.empty((height, width), dtype=np.float32) |
|
|
|
for i in range(0, height): |
|
for j in range(0, width): |
|
distance = utility.distance_euclid(center_pixel_pos, [i, j]) / max_distance |
|
|
|
gain = params[0] * (distance - params[1])**2 + params[2] |
|
temp[i, j] = self.data[i, j] * gain |
|
|
|
temp = np.clip(temp, clip_min, clip_max) |
|
return temp |
|
|
|
def __str__(self): |
|
return "lens shading correction. There are two methods: " + \ |
|
"\n (1) flat_field_compensation: requires dark_current_image and flat_field_image" + \ |
|
"\n (2) approximate_mathematical_compensation:" |
|
|
|
|
|
|
|
|
|
|
|
|
|
class bayer_denoising: |
|
def __init__(self, data, name="bayer_denoising"): |
|
|
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def utilize_hvs_behavior(self, bayer_pattern, initial_noise_level, hvs_min, hvs_max, threshold_red_blue, clip_range): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running bayer denoising utilizing hvs behavior...") |
|
|
|
|
|
|
|
raw = self.data |
|
raw = np.clip(raw, clip_range[0], clip_range[1]) |
|
width, height = utility.helpers(raw).get_width_height() |
|
|
|
|
|
|
|
|
|
|
|
if (bayer_pattern != "rggb"): |
|
raw = utility.helpers(self.data).shuffle_bayer_pattern(bayer_pattern, "rggb") |
|
|
|
|
|
neighborhood_size = 5 |
|
|
|
|
|
|
|
|
|
no_of_pixel_pad = math.floor(neighborhood_size / 2) |
|
|
|
raw = np.pad(raw, \ |
|
(no_of_pixel_pad, no_of_pixel_pad),\ |
|
'reflect') |
|
|
|
|
|
denoised_out = np.empty((height, width), dtype=np.float32) |
|
|
|
texture_degree_debug = np.empty((height, width), dtype=np.float32) |
|
for i in range(no_of_pixel_pad, height + no_of_pixel_pad): |
|
for j in range(no_of_pixel_pad, width + no_of_pixel_pad): |
|
|
|
|
|
center_pixel = raw[i, j] |
|
|
|
|
|
half_max = clip_range[1] / 2 |
|
if (center_pixel <= half_max): |
|
hvs_weight = -(((hvs_max - hvs_min) * center_pixel) / half_max) + hvs_max |
|
else: |
|
hvs_weight = (((center_pixel - clip_range[1]) * (hvs_max - hvs_min))/(clip_range[1] - half_max)) + hvs_max |
|
|
|
|
|
if (j < no_of_pixel_pad+2): |
|
noise_level_previous_red = initial_noise_level |
|
noise_level_previous_blue = initial_noise_level |
|
noise_level_previous_green = initial_noise_level |
|
else: |
|
noise_level_previous_green = noise_level_current_green |
|
if ((i % 2) == 0): |
|
noise_level_previous_red = noise_level_current_red |
|
elif ((i % 2) != 0): |
|
noise_level_previous_blue = noise_level_current_blue |
|
|
|
|
|
|
|
if (((i % 2) == 0) and ((j % 2) == 0)): |
|
|
|
neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\ |
|
raw[i, j-2], raw[i, j+2],\ |
|
raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]] |
|
|
|
|
|
d = np.abs(neighborhood - center_pixel) |
|
|
|
|
|
d_max = np.max(d) |
|
d_min = np.min(d) |
|
|
|
|
|
texture_threshold = hvs_weight + noise_level_previous_red |
|
|
|
|
|
if (d_max <= threshold_red_blue): |
|
texture_degree = 1. |
|
elif ((d_max > threshold_red_blue) and (d_max <= texture_threshold)): |
|
texture_degree = -((d_max - threshold_red_blue) / (texture_threshold - threshold_red_blue)) + 1. |
|
elif (d_max > texture_threshold): |
|
texture_degree = 0. |
|
|
|
|
|
noise_level_current_red = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_red |
|
|
|
|
|
elif (((i % 2) != 0) and ((j % 2) != 0)): |
|
|
|
|
|
neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\ |
|
raw[i, j-2], raw[i, j+2],\ |
|
raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]] |
|
|
|
|
|
d = np.abs(neighborhood - center_pixel) |
|
|
|
|
|
d_max = np.max(d) |
|
d_min = np.min(d) |
|
|
|
|
|
texture_threshold = hvs_weight + noise_level_previous_blue |
|
|
|
|
|
if (d_max <= threshold_red_blue): |
|
texture_degree = 1. |
|
elif ((d_max > threshold_red_blue) and (d_max <= texture_threshold)): |
|
texture_degree = -((d_max - threshold_red_blue) / (texture_threshold - threshold_red_blue)) + 1. |
|
elif (d_max > texture_threshold): |
|
texture_degree = 0. |
|
|
|
|
|
noise_level_current_blue = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_blue |
|
|
|
|
|
elif ((((i % 2) == 0) and ((j % 2) != 0)) or (((i % 2) != 0) and ((j % 2) == 0))): |
|
|
|
neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\ |
|
raw[i-1, j-1], raw[i-1, j+1],\ |
|
raw[i, j-2], raw[i, j+2],\ |
|
raw[i+1, j-1], raw[i+1, j+1],\ |
|
raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]] |
|
|
|
|
|
d = np.abs(neighborhood - center_pixel) |
|
|
|
|
|
d_max = np.max(d) |
|
d_min = np.min(d) |
|
|
|
|
|
texture_threshold = hvs_weight + noise_level_previous_green |
|
|
|
|
|
if (d_max == 0): |
|
texture_degree = 1 |
|
elif ((d_max > 0) and (d_max <= texture_threshold)): |
|
texture_degree = -(d_max / texture_threshold) + 1. |
|
elif (d_max > texture_threshold): |
|
texture_degree = 0 |
|
|
|
|
|
noise_level_current_green = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_green |
|
|
|
|
|
if (texture_degree == 1): |
|
threshold_low = threshold_high = d_max |
|
elif (texture_degree == 0): |
|
threshold_low = d_min |
|
threshold_high = (d_max + d_min) / 2 |
|
elif ((texture_degree > 0) and (texture_degree < 1)): |
|
threshold_high = (d_max + ((d_max + d_min) / 2)) / 2 |
|
threshold_low = (d_min + threshold_high) / 2 |
|
|
|
|
|
weight = np.empty(np.size(d), dtype=np.float32) |
|
pf = 0. |
|
for w_i in range(0, np.size(d)): |
|
if (d[w_i] <= threshold_low): |
|
weight[w_i] = 1. |
|
elif (d[w_i] > threshold_high): |
|
weight[w_i] = 0. |
|
elif ((d[w_i] > threshold_low) and (d[w_i] < threshold_high)): |
|
weight[w_i] = 1. + ((d[w_i] - threshold_low) / (threshold_low - threshold_high)) |
|
|
|
pf += weight[w_i] * neighborhood[w_i] + (1. - weight[w_i]) * center_pixel |
|
|
|
denoised_out[i - no_of_pixel_pad, j-no_of_pixel_pad] = pf / np.size(d) |
|
|
|
texture_degree_debug[i - no_of_pixel_pad, j-no_of_pixel_pad] = texture_degree |
|
|
|
if (bayer_pattern != "rggb"): |
|
denoised_out = utility.shuffle_bayer_pattern(denoised_out, "rggb", bayer_pattern) |
|
|
|
return np.clip(denoised_out, clip_range[0], clip_range[1]), texture_degree_debug |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class color_correction: |
|
def __init__(self, data, color_matrix, color_space="srgb", illuminant="d65", name="color correction", clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.data = np.float32(data) |
|
self.xyz2cam = np.float32(color_matrix) |
|
self.color_space = color_space |
|
self.illuminant = illuminant |
|
self.name = name |
|
self.clip_range = clip_range |
|
|
|
def get_rgb2xyz(self): |
|
|
|
|
|
|
|
if (self.color_space == "srgb"): |
|
if (self.illuminant == "d65"): |
|
return [[.4124564, .3575761, .1804375],\ |
|
[.2126729, .7151522, .0721750],\ |
|
[.0193339, .1191920, .9503041]] |
|
elif (self.illuminant == "d50"): |
|
return [[.4360747, .3850649, .1430804],\ |
|
[.2225045, .7168786, .0606169],\ |
|
[.0139322, .0971045, .7141733]] |
|
else: |
|
print("for now, color_space must be d65 or d50") |
|
return |
|
|
|
elif (self.color_space == "adobe-rgb-1998"): |
|
if (self.illuminant == "d65"): |
|
return [[.5767309, .1855540, .1881852],\ |
|
[.2973769, .6273491, .0752741],\ |
|
[.0270343, .0706872, .9911085]] |
|
elif (self.illuminant == "d50"): |
|
return [[.6097559, .2052401, .1492240],\ |
|
[.3111242, .6256560, .0632197],\ |
|
[.0194811, .0608902, .7448387]] |
|
else: |
|
print("for now, illuminant must be d65 or d50") |
|
return |
|
else: |
|
print("for now, color_space must be srgb or adobe-rgb-1998") |
|
return |
|
|
|
def calculate_cam2rgb(self): |
|
|
|
|
|
|
|
rgb2cam = np.dot(self.xyz2cam, self.get_rgb2xyz()) |
|
|
|
|
|
|
|
rgb2cam = np.divide(rgb2cam, np.reshape(np.sum(rgb2cam, 1), [3, 1])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (np.linalg.cond(rgb2cam) < (1 / sys.float_info.epsilon)): |
|
return np.linalg.inv(rgb2cam) |
|
else: |
|
print("Warning! matrix not invertible.") |
|
return np.identity(3, dtype=np.float32) |
|
|
|
def apply_cmatrix(self): |
|
|
|
|
|
print("----------------------------------------------------") |
|
print("running color correction...") |
|
|
|
|
|
if (np.ndim(self.data) != 3): |
|
print("data need to be three dimensional") |
|
return |
|
|
|
|
|
cam2rgb = self.calculate_cam2rgb() |
|
|
|
|
|
width, height = utility.helpers(self.data).get_width_height() |
|
|
|
|
|
R = self.data[:, :, 0] |
|
G = self.data[:, :, 1] |
|
B = self.data[:, :, 2] |
|
|
|
color_corrected = np.empty((height, width, 3), dtype=np.float32) |
|
color_corrected[:, :, 0] = R * cam2rgb[0, 0] + G * cam2rgb[0, 1] + B * cam2rgb[0, 2] |
|
color_corrected[:, :, 1] = R * cam2rgb[1, 0] + G * cam2rgb[1, 1] + B * cam2rgb[1, 2] |
|
color_corrected[:, :, 2] = R * cam2rgb[2, 0] + G * cam2rgb[2, 1] + B * cam2rgb[2, 2] |
|
|
|
return np.clip(color_corrected, self.clip_range[0], self.clip_range[1]) |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class nonlinearity: |
|
def __init__(self, data, name="nonlinearity"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def luma_adjustment(self, multiplier, clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running brightening...") |
|
|
|
return np.clip(np.log10(multiplier) * self.data, clip_range[0], clip_range[1]) |
|
|
|
def by_value(self, value, clip_range): |
|
|
|
print("----------------------------------------------------") |
|
print("Running nonlinearity by value...") |
|
|
|
|
|
data = np.clip(self.data, clip_range[0], clip_range[1]) |
|
|
|
data = data / clip_range[1] |
|
|
|
return np.clip(clip_range[1] * (data**value), clip_range[0], clip_range[1]) |
|
|
|
def by_table(self, table, nonlinearity_type="gamma", clip_range=[0, 65535]): |
|
|
|
print("----------------------------------------------------") |
|
print("Running nonlinearity by table...") |
|
|
|
gamma_table = np.loadtxt(table) |
|
gamma_table = clip_range[1] * gamma_table / np.max(gamma_table) |
|
linear_table = np.linspace(clip_range[0], clip_range[1], np.size(gamma_table)) |
|
|
|
|
|
if (nonlinearity_type == "gamma"): |
|
|
|
return np.clip(np.interp(self.data, linear_table, gamma_table), clip_range[0], clip_range[1]) |
|
elif (nonlinearity_type == "degamma"): |
|
|
|
return np.clip(np.interp(self.data, gamma_table, linear_table), clip_range[0], clip_range[1]) |
|
|
|
def by_equation(self, a, b, clip_range): |
|
|
|
print("----------------------------------------------------") |
|
print("Running nonlinearity by equation...") |
|
|
|
|
|
data = np.clip(self.data, clip_range[0], clip_range[1]) |
|
|
|
data = data / clip_range[1] |
|
|
|
|
|
return np.clip(clip_range[1] * (a * np.exp(b * data) + data + a * data - a * np.exp(b) * data - a), clip_range[0], clip_range[1]) |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class tone_mapping: |
|
def __init__(self, data, name="tone mapping"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def nonlinear_masking(self, strength_multiplier=1.0, gaussian_kernel_size=[5, 5], gaussian_sigma=1.0, clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running tone mapping by non linear masking...") |
|
|
|
|
|
if (np.ndim(self.data) == 3): |
|
gray_image = utility.color_conversion(self.data).rgb2gray() |
|
else: |
|
gray_image = self.data |
|
|
|
|
|
gaussian_kernel = utility.create_filter().gaussian(gaussian_kernel_size, gaussian_sigma) |
|
|
|
|
|
|
|
|
|
mask = signal.convolve2d(gray_image, gaussian_kernel, mode="same", boundary="symm") |
|
mask = strength_multiplier * mask / clip_range[1] |
|
|
|
|
|
temp = np.power(0.5, mask) |
|
if (np.ndim(self.data) == 3): |
|
width, height = utility.helpers(self.data).get_width_height() |
|
alpha = np.empty((height, width, 3), dtype=np.float32) |
|
alpha[:, :, 0] = temp |
|
alpha[:, :, 1] = temp |
|
alpha[:, :, 2] = temp |
|
else: |
|
alpha = temp |
|
|
|
|
|
return np.clip(clip_range[1] * np.power(self.data/clip_range[1], alpha), clip_range[0], clip_range[1]) |
|
|
|
def dynamic_range_compression(self, drc_type="normal", drc_bound=[-40., 260.], clip_range=[0, 65535]): |
|
|
|
ycc = utility.color_conversion(self.data).rgb2ycc("bt601") |
|
y = ycc[:, :, 0] |
|
cb = ycc[:, :, 1] |
|
cr = ycc[:, :, 2] |
|
|
|
if (drc_type == "normal"): |
|
edge = y |
|
elif (drc_type == "joint"): |
|
edge = utility.edge_detection(y).sobel(3, "gradient_magnitude") |
|
|
|
y_bilateral_filtered = utility.special_function(y).bilateral_filter(edge) |
|
detail = np.divide(ycc[:, :, 0], y_bilateral_filtered) |
|
|
|
C = drc_bound[0] * clip_range[1] / 255. |
|
temp = drc_bound[1] * clip_range[1] / 255. |
|
F = (temp * (C + clip_range[1])) / (clip_range[1] * (temp - C)) |
|
y_bilateral_filtered_contrast_reduced = F * (y_bilateral_filtered - (clip_range[1] / 2.)) + (clip_range[1] / 2.) |
|
|
|
y_out = np.multiply(y_bilateral_filtered_contrast_reduced, detail) |
|
|
|
ycc_out = ycc |
|
ycc_out[:, :, 0] = y_out |
|
rgb_out = utility.color_conversion(ycc_out).ycc2rgb("bt601") |
|
|
|
return np.clip(rgb_out, clip_range[0], clip_range[1]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
class sharpening: |
|
def __init__(self, data, name="sharpening"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def unsharp_masking(self, gaussian_kernel_size=[5, 5], gaussian_sigma=2.0,\ |
|
slope=1.5, tau_threshold=0.05, gamma_speed=4., clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("----------------------------------------------------") |
|
print("Running sharpening by unsharp masking...") |
|
|
|
|
|
gaussian_kernel = utility.create_filter().gaussian(gaussian_kernel_size, gaussian_sigma) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if np.ndim(self.data > 2): |
|
image_blur = np.empty(np.shape(self.data), dtype=np.float32) |
|
for i in range(0, np.shape(self.data)[2]): |
|
image_blur[:, :, i] = signal.convolve2d(self.data[:, :, i], gaussian_kernel, mode="same", boundary="symm") |
|
else: |
|
image_blur = signal.convolove2d(self.data, gaussian_kernel, mode="same", boundary="symm") |
|
|
|
|
|
image_high_pass = self.data - image_blur |
|
|
|
|
|
|
|
tau_threshold = tau_threshold * clip_range[1] |
|
|
|
|
|
|
|
return np.clip(self.data + utility.special_function(\ |
|
image_high_pass).soft_coring(\ |
|
slope, tau_threshold, gamma_speed), clip_range[0], clip_range[1]) |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class noise_reduction: |
|
def __init__(self, data, clip_range=[0, 65535], name="noise reduction"): |
|
self.data = np.float32(data) |
|
self.clip_range = clip_range |
|
self.name = name |
|
|
|
def sigma_filter(self, neighborhood_size=7, sigma=[6, 6, 6]): |
|
|
|
print("----------------------------------------------------") |
|
print("Running noise reduction by sigma filter...") |
|
|
|
if np.ndim(self.data > 2): |
|
output = np.empty(np.shape(self.data), dtype=np.float32) |
|
for i in range(0, np.shape(self.data)[2]): |
|
output[:, :, i] = utility.helpers(self.data[:, :, i]).sigma_filter_helper(neighborhood_size, sigma[i]) |
|
return np.clip(output, self.clip_range[0], self.clip_range[1]) |
|
else: |
|
return np.clip(utility.helpers(self.data).sigma_filter_helper(neighborhood_size, sigma), self.clip_range[0], self.clip_range[1]) |
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class distortion_correction: |
|
def __init__(self, data, name="distortion correction"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
|
|
def empirical_correction(self, correction_type="pincushion-1", strength=0.1, zoom_type="crop", clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (strength < 0): |
|
print("Warning! strength should be equal of greater than 0.") |
|
return self.data |
|
|
|
print("----------------------------------------------------") |
|
print("Running distortion correction by empirical method...") |
|
|
|
|
|
width, height = utility.helpers(self.data).get_width_height() |
|
half_width = width / 2 |
|
half_height = height / 2 |
|
|
|
|
|
xi, yi = np.meshgrid(np.linspace(-half_width, half_width, width),\ |
|
np.linspace(-half_height, half_height, height)) |
|
|
|
|
|
r = np.sqrt(xi**2 + yi**2) |
|
theta = np.arctan2(yi, xi) |
|
|
|
|
|
R = math.sqrt(width**2 + height**2) |
|
|
|
|
|
r = r / R |
|
|
|
|
|
s = utility.special_function(r).distortion_function(correction_type, strength) |
|
|
|
|
|
if ((correction_type=="barrel-1") or (correction_type=="barrel-2")): |
|
if (zoom_type == "fit"): |
|
scaling_parameter = r[0, 0] / s[0, 0] |
|
elif (zoom_type == "crop"): |
|
scaling_parameter = 1. / (1. + strength * (np.min([half_width, half_height])/R)**2) |
|
elif ((correction_type=="pincushion-1") or (correction_type=="pincushion-2")): |
|
if (zoom_type == "fit"): |
|
scaling_parameter = 1. / (1. + strength * (np.min([half_width, half_height])/R)**2) |
|
elif (zoom_type == "crop"): |
|
scaling_parameter = r[0, 0] / s[0, 0] |
|
|
|
|
|
s = s * scaling_parameter * R |
|
|
|
|
|
xt = np.multiply(s, np.cos(theta)) |
|
yt = np.multiply(s, np.sin(theta)) |
|
|
|
|
|
if np.ndim(self.data == 3): |
|
|
|
output = np.empty(np.shape(self.data), dtype=np.float32) |
|
|
|
output[:, :, 0] = utility.helpers(self.data[:, :, 0]).bilinear_interpolation(xt + half_width, yt + half_height) |
|
output[:, :, 1] = utility.helpers(self.data[:, :, 1]).bilinear_interpolation(xt + half_width, yt + half_height) |
|
output[:, :, 2] = utility.helpers(self.data[:, :, 2]).bilinear_interpolation(xt + half_width, yt + half_height) |
|
|
|
elif np.ndim(self.data == 2): |
|
|
|
output = utility.helpers(self.data).bilinear_interpolation(xt + half_width, yt + half_height) |
|
|
|
return np.clip(output, clip_range[0], clip_range[1]) |
|
|
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
class memory_color_enhancement: |
|
def __init__(self, data, name="memory color enhancement"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def by_hue_squeeze(self, target_hue, hue_preference, hue_sigma, is_both_side, multiplier, chroma_preference, chroma_sigma, color_space="srgb", illuminant="d65", clip_range=[0, 65535], cie_version="1931"): |
|
|
|
|
|
data = utility.color_conversion(self.data).rgb2xyz(color_space, clip_range) |
|
|
|
data = utility.color_conversion(data).xyz2lab(cie_version, illuminant) |
|
|
|
data = utility.color_conversion(data).lab2lch() |
|
|
|
|
|
|
|
width, height = utility.helpers(self.data).get_width_height() |
|
hue_correction = np.zeros((height, width), dtype=np.float32) |
|
for i in range(0, np.size(target_hue)): |
|
|
|
delta_hue = data[:, :, 2] - hue_preference[i] |
|
|
|
if is_both_side[i]: |
|
weight_temp = np.exp( -np.power(data[:, :, 2] - target_hue[i], 2) / (2 * hue_sigma[i]**2)) + \ |
|
np.exp( -np.power(data[:, :, 2] + target_hue[i], 2) / (2 * hue_sigma[i]**2)) |
|
else: |
|
weight_temp = np.exp( -np.power(data[:, :, 2] - target_hue[i], 2) / (2 * hue_sigma[i]**2)) |
|
|
|
weight_hue = multiplier[i] * weight_temp / np.max(weight_temp) |
|
|
|
weight_chroma = np.exp( -np.power(data[:, :, 1] - chroma_preference[i], 2) / (2 * chroma_sigma[i]**2)) |
|
|
|
hue_correction = hue_correction + np.multiply(np.multiply(delta_hue, weight_hue), weight_chroma) |
|
|
|
|
|
data[:, :, 2] = data[:, :, 2] - hue_correction |
|
|
|
|
|
data = utility.color_conversion(data).lch2lab() |
|
|
|
data = utility.color_conversion(data).lab2xyz(cie_version, illuminant) |
|
|
|
data = utility.color_conversion(data).xyz2rgb(color_space, clip_range) |
|
|
|
return data |
|
|
|
|
|
def __str__(self): |
|
return self.name |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class chromatic_aberration_correction: |
|
def __init__(self, data, name="chromatic aberration correction"): |
|
self.data = np.float32(data) |
|
self.name = name |
|
|
|
def purple_fringe_removal(self, nsr_threshold, cr_threshold, clip_range=[0, 65535]): |
|
|
|
|
|
|
|
|
|
|
|
width, height = utility.helpers(self.data).get_width_height() |
|
|
|
r = self.data[:, :, 0] |
|
g = self.data[:, :, 1] |
|
b = self.data[:, :, 2] |
|
|
|
|
|
|
|
nsr_threshold = clip_range[1] * nsr_threshold / 100 |
|
temp = (r + g + b) / 3 |
|
temp = np.asarray(temp) |
|
mask = temp > nsr_threshold |
|
nsr = np.zeros((height, width)).astype(int) |
|
nsr[mask] = 1 |
|
|
|
|
|
temp = r - b |
|
temp1 = b - g |
|
temp = np.asarray(temp) |
|
temp1 = np.asarray(temp1) |
|
mask = (temp < cr_threshold) & (temp1 > cr_threshold) |
|
cr = np.zeros((height, width)).astype(int) |
|
cr[mask] = 1 |
|
|
|
|
|
qr = utility.helpers(r).nonuniform_quantization() |
|
qg = utility.helpers(g).nonuniform_quantization() |
|
qb = utility.helpers(b).nonuniform_quantization() |
|
|
|
g_qr = utility.edge_detection(qr).sobel(5, "gradient_magnitude") |
|
g_qg = utility.edge_detection(qg).sobel(5, "gradient_magnitude") |
|
g_qb = utility.edge_detection(qb).sobel(5, "gradient_magnitude") |
|
|
|
g_qr = np.asarray(g_qr) |
|
g_qg = np.asarray(g_qg) |
|
g_qb = np.asarray(g_qb) |
|
|
|
|
|
bgm = np.zeros((height, width), dtype=np.float32) |
|
mask = (g_qr != 0) | (g_qg != 0) | (g_qb != 0) |
|
bgm[mask] = 1 |
|
|
|
fringe_map = np.multiply(np.multiply(nsr, cr), bgm) |
|
fring_map = np.asarray(fringe_map) |
|
mask = (fringe_map == 1) |
|
|
|
r1 = r |
|
g1 = g |
|
b1 = b |
|
r1[mask] = g1[mask] = b1[mask] = (r[mask] + g[mask] + b[mask]) / 3. |
|
|
|
output = np.empty(np.shape(self.data), dtype=np.float32) |
|
output[:, :, 0] = r1 |
|
output[:, :, 1] = g1 |
|
output[:, :, 2] = b1 |
|
|
|
return np.float32(output) |
|
|
|
|
|
def __str__(self): |
|
return self.name |
|
|