Spaces:
Sleeping
Sleeping
app.py
Browse files
app.py
ADDED
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
#from imagesFunctions import *
|
5 |
+
|
6 |
+
# Define preprocessing functions
|
7 |
+
def grayscale(image):
|
8 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
9 |
+
return gray_image
|
10 |
+
|
11 |
+
def blur(image):
|
12 |
+
blurred_image = cv2.GaussianBlur(image, (15, 15), 0)
|
13 |
+
return blurred_image
|
14 |
+
|
15 |
+
def edge_detection(image):
|
16 |
+
edges = cv2.Canny(image, 100, 200)
|
17 |
+
return edges
|
18 |
+
|
19 |
+
def invert_colors(image):
|
20 |
+
inverted_image = cv2.bitwise_not(image)
|
21 |
+
return inverted_image
|
22 |
+
|
23 |
+
def threshold(image):
|
24 |
+
_, thresh_image = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY)
|
25 |
+
return thresh_image
|
26 |
+
|
27 |
+
def gray_level_transform(image, alpha=1.0, beta=0.0):
|
28 |
+
"""
|
29 |
+
Apply a simple gray level transformation to the image.
|
30 |
+
Formula: new_intensity = alpha * old_intensity + beta
|
31 |
+
"""
|
32 |
+
transformed_image = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
|
33 |
+
return transformed_image
|
34 |
+
|
35 |
+
def negative_transform(image):
|
36 |
+
"""
|
37 |
+
Apply a negative transformation to the image.
|
38 |
+
"""
|
39 |
+
negative_image = 255 - image # Invert pixel values
|
40 |
+
return negative_image
|
41 |
+
|
42 |
+
def log_transform(image, c=1):
|
43 |
+
"""
|
44 |
+
Apply a logarithmic transformation to the image.
|
45 |
+
"""
|
46 |
+
log_image = np.log1p(c * image) # Apply log transformation
|
47 |
+
# Scale the values to the range [0, 255]
|
48 |
+
log_image = (log_image / np.max(log_image)) * 255
|
49 |
+
log_image = np.uint8(log_image)
|
50 |
+
return log_image
|
51 |
+
|
52 |
+
def power_law_transform(image, gamma=1.0):
|
53 |
+
"""
|
54 |
+
Apply a power law transformation (gamma correction) to the image.
|
55 |
+
"""
|
56 |
+
# Apply gamma correction
|
57 |
+
power_law_image = np.power(image / 255.0, gamma)
|
58 |
+
# Scale the values back to the range [0, 255]
|
59 |
+
power_law_image = np.uint8(power_law_image * 255)
|
60 |
+
return power_law_image
|
61 |
+
|
62 |
+
def contrast_stretching(image, low=0, high=255):
|
63 |
+
"""
|
64 |
+
Stretch the contrast of an image by mapping pixel values to a new range.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
image: A numpy array representing the image.
|
68 |
+
low: The minimum value in the output image (default: 0).
|
69 |
+
high: The maximum value in the output image (default: 255).
|
70 |
+
|
71 |
+
Returns:
|
72 |
+
A numpy array representing the contrast-stretched image.
|
73 |
+
"""
|
74 |
+
# Find the minimum and maximum values in the image
|
75 |
+
min_val = np.amin(image)
|
76 |
+
max_val = np.amax(image)
|
77 |
+
|
78 |
+
# Check if min and max are the same (no stretch needed)
|
79 |
+
if min_val == max_val:
|
80 |
+
return image
|
81 |
+
|
82 |
+
# Normalize the pixel values to the range [0, 1]
|
83 |
+
normalized = (image - min_val) / (max_val - min_val)
|
84 |
+
|
85 |
+
# Stretch the normalized values to the new range [low, high]
|
86 |
+
stretched = normalized * (high - low) + low
|
87 |
+
|
88 |
+
# Convert the stretched values back to the original data type (uint8 for images)
|
89 |
+
return np.uint8(stretched * 255)
|
90 |
+
|
91 |
+
|
92 |
+
def intensity_slicing(image, threshold):
|
93 |
+
"""
|
94 |
+
Perform intensity slicing on an image to create a binary image.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
image: A numpy array representing the image.
|
98 |
+
threshold: The intensity threshold for binarization (default: 128).
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
A numpy array representing the binary image after intensity slicing.
|
102 |
+
"""
|
103 |
+
# Create a copy of the image to avoid modifying the original
|
104 |
+
sliced_image = image.copy()
|
105 |
+
|
106 |
+
# Apply thresholding
|
107 |
+
sliced_image[sliced_image > threshold] = 255 # Set values above threshold to white (255)
|
108 |
+
sliced_image[sliced_image <= threshold] = 0 # Set values below or equal to threshold to black (0)
|
109 |
+
|
110 |
+
return sliced_image
|
111 |
+
|
112 |
+
def histogram_equalization(image):
|
113 |
+
"""
|
114 |
+
Perform histogram equalization on an image to enhance its contrast.
|
115 |
+
|
116 |
+
Args:
|
117 |
+
image: A numpy array representing the image.
|
118 |
+
|
119 |
+
Returns:
|
120 |
+
A numpy array representing the image after histogram equalization.
|
121 |
+
"""
|
122 |
+
# Compute histogram of the input image
|
123 |
+
hist, _ = np.histogram(image.flatten(), bins=256, range=(0,256))
|
124 |
+
|
125 |
+
# Compute cumulative distribution function (CDF)
|
126 |
+
cdf = hist.cumsum()
|
127 |
+
|
128 |
+
# Normalize CDF
|
129 |
+
cdf_normalized = cdf * hist.max() / cdf.max()
|
130 |
+
|
131 |
+
# Perform histogram equalization
|
132 |
+
equalized_image = np.interp(image.flatten(), range(256), cdf_normalized).reshape(image.shape)
|
133 |
+
|
134 |
+
return equalized_image.astype(np.uint8)
|
135 |
+
|
136 |
+
|
137 |
+
def mean_filter(image, kernel_size=3):
|
138 |
+
"""
|
139 |
+
Apply a mean filter (averaging filter) to the image.
|
140 |
+
|
141 |
+
Args:
|
142 |
+
image: A numpy array representing the input image.
|
143 |
+
kernel_size: The size of the square kernel (default: 3).
|
144 |
+
|
145 |
+
Returns:
|
146 |
+
A numpy array representing the image after applying the mean filter.
|
147 |
+
"""
|
148 |
+
# Define the kernel
|
149 |
+
kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)
|
150 |
+
|
151 |
+
# Apply convolution with the kernel using OpenCV's filter2D function
|
152 |
+
filtered_image = cv2.filter2D(image, -1, kernel)
|
153 |
+
|
154 |
+
return filtered_image
|
155 |
+
|
156 |
+
def gaussian_filter(image, kernel_size=3, sigma=1):
|
157 |
+
"""
|
158 |
+
Apply a Gaussian filter to the image.
|
159 |
+
|
160 |
+
Args:
|
161 |
+
image: A numpy array representing the input image.
|
162 |
+
kernel_size: The size of the square kernel (default: 3).
|
163 |
+
sigma: The standard deviation of the Gaussian distribution (default: 1).
|
164 |
+
|
165 |
+
Returns:
|
166 |
+
A numpy array representing the image after applying the Gaussian filter.
|
167 |
+
"""
|
168 |
+
# Generate Gaussian kernel
|
169 |
+
kernel = cv2.getGaussianKernel(kernel_size, sigma)
|
170 |
+
kernel = np.outer(kernel, kernel.transpose())
|
171 |
+
|
172 |
+
# Apply convolution with the kernel using OpenCV's filter2D function
|
173 |
+
filtered_image = cv2.filter2D(image, -1, kernel)
|
174 |
+
|
175 |
+
return filtered_image
|
176 |
+
|
177 |
+
def sobel_filter(image):
|
178 |
+
"""
|
179 |
+
Apply the Sobel filter to the image for edge detection.
|
180 |
+
|
181 |
+
Args:
|
182 |
+
image: A numpy array representing the input image.
|
183 |
+
|
184 |
+
Returns:
|
185 |
+
A numpy array representing the image after applying the Sobel filter.
|
186 |
+
"""
|
187 |
+
# Apply Sobel filter for horizontal gradient
|
188 |
+
sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=3)
|
189 |
+
|
190 |
+
# Apply Sobel filter for vertical gradient
|
191 |
+
sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=3)
|
192 |
+
|
193 |
+
# Combine horizontal and vertical gradients to get the gradient magnitude
|
194 |
+
gradient_magnitude = np.sqrt(sobel_x**2 + sobel_y**2)
|
195 |
+
|
196 |
+
# Normalize the gradient magnitude to the range [0, 255]
|
197 |
+
gradient_magnitude = cv2.normalize(gradient_magnitude, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
198 |
+
|
199 |
+
return gradient_magnitude
|
200 |
+
|
201 |
+
def convert_to_grayscale(image):
|
202 |
+
"""
|
203 |
+
Converts an image to grayscale.
|
204 |
+
|
205 |
+
Args:
|
206 |
+
image: A NumPy array representing the image (BGR or RGB format).
|
207 |
+
|
208 |
+
Returns:
|
209 |
+
A NumPy array representing the grayscale image.
|
210 |
+
"""
|
211 |
+
# Check if image is already grayscale
|
212 |
+
if len(image.shape) == 2:
|
213 |
+
return image # Already grayscale
|
214 |
+
|
215 |
+
# Convert the image to grayscale using OpenCV's BGR2GRAY conversion
|
216 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
217 |
+
|
218 |
+
return gray_image
|
219 |
+
|
220 |
+
|
221 |
+
def laplacian_filter(image):
|
222 |
+
"""
|
223 |
+
Apply the Laplacian filter to the grayscale image for edge detection.
|
224 |
+
|
225 |
+
Args:
|
226 |
+
image: A numpy array representing the input image.
|
227 |
+
|
228 |
+
Returns:
|
229 |
+
A numpy array representing the image after applying the Laplacian filter.
|
230 |
+
"""
|
231 |
+
# Convert the input image to grayscale
|
232 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
233 |
+
|
234 |
+
# Apply Laplacian filter using OpenCV's Laplacian function
|
235 |
+
laplacian = cv2.Laplacian(gray_image, cv2.CV_64F)
|
236 |
+
|
237 |
+
# Convert the output to uint8 and scale to [0, 255]
|
238 |
+
laplacian = cv2.normalize(laplacian, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
239 |
+
|
240 |
+
return laplacian
|
241 |
+
|
242 |
+
def min_max_filter(image, kernel_size=3, mode='min'):
|
243 |
+
"""
|
244 |
+
Apply the min-max filter to the image.
|
245 |
+
|
246 |
+
Args:
|
247 |
+
image: A numpy array representing the input image.
|
248 |
+
kernel_size: The size of the square kernel (default: 3).
|
249 |
+
mode: The mode of the filter ('min' or 'max') (default: 'min').
|
250 |
+
|
251 |
+
Returns:
|
252 |
+
A numpy array representing the image after applying the min-max filter.
|
253 |
+
"""
|
254 |
+
# Define the kernel
|
255 |
+
kernel = np.ones((kernel_size, kernel_size))
|
256 |
+
|
257 |
+
# Apply minimum or maximum filter
|
258 |
+
if mode == 'min':
|
259 |
+
filtered_image = cv2.erode(image, kernel)
|
260 |
+
elif mode == 'max':
|
261 |
+
filtered_image = cv2.dilate(image, kernel)
|
262 |
+
else:
|
263 |
+
raise ValueError("Invalid mode. Mode must be 'min' or 'max'.")
|
264 |
+
|
265 |
+
return filtered_image
|
266 |
+
|
267 |
+
def median_filter(image, kernel_size=3):
|
268 |
+
"""
|
269 |
+
Apply the median filter to the image.
|
270 |
+
|
271 |
+
Args:
|
272 |
+
image: A numpy array representing the input image.
|
273 |
+
kernel_size: The size of the square kernel (default: 3).
|
274 |
+
|
275 |
+
Returns:
|
276 |
+
A numpy array representing the image after applying the median filter.
|
277 |
+
"""
|
278 |
+
# Ensure that kernel_size is odd
|
279 |
+
if kernel_size % 2 == 0:
|
280 |
+
kernel_size += 1
|
281 |
+
|
282 |
+
# Apply median filter using OpenCV's medianBlur function
|
283 |
+
filtered_image = cv2.medianBlur(image, kernel_size)
|
284 |
+
|
285 |
+
return filtered_image
|
286 |
+
|
287 |
+
# Define preprocessing function choices (must be before using in Dropdown)
|
288 |
+
preprocessing_functions = [
|
289 |
+
("Grayscale", grayscale),
|
290 |
+
("Blur", blur),
|
291 |
+
("Edge Detection", edge_detection),
|
292 |
+
("Invert Colors", invert_colors),
|
293 |
+
("Threshold", threshold),
|
294 |
+
("Gray Level Transform", gray_level_transform),
|
295 |
+
("Negative Transform", negative_transform),
|
296 |
+
("Log Transform", log_transform),
|
297 |
+
("Power Law Transform", power_law_transform),
|
298 |
+
("Contrast Stretching", contrast_stretching),
|
299 |
+
("intensity slicing", intensity_slicing),
|
300 |
+
("histogram equalization", histogram_equalization),
|
301 |
+
("mean filter", mean_filter),
|
302 |
+
("gaussian filter", gaussian_filter),
|
303 |
+
("sobel filter", sobel_filter),
|
304 |
+
("laplacian filter", laplacian_filter),
|
305 |
+
("min max filter", min_max_filter),
|
306 |
+
("median filter", median_filter),
|
307 |
+
]
|
308 |
+
|
309 |
+
input_image = gr.components.Image(label="Upload Image")
|
310 |
+
function_selector = gr.components.Dropdown(choices=[func[0] for func in preprocessing_functions], label="Select Preprocessing Function")
|
311 |
+
|
312 |
+
# Define slider for alpha value
|
313 |
+
alpha_slider = gr.components.Slider(minimum=-100, maximum=100, label="alpha")
|
314 |
+
alpha_slider.default = 0 # Set default value for alpha
|
315 |
+
|
316 |
+
# Define slider for beta value
|
317 |
+
beta_slider = gr.components.Slider(minimum=0.1, maximum=3.0, label="beta")
|
318 |
+
beta_slider.default = 1.0 # Set default value for beta
|
319 |
+
|
320 |
+
# Define slider for c_log value
|
321 |
+
c_log_slider = gr.components.Slider(minimum=0.1, maximum=3.0, label="c_log")
|
322 |
+
c_log_slider.default = 1.0 # Set default value for c_log
|
323 |
+
|
324 |
+
# Define slider for gamma value
|
325 |
+
gamma_slider = gr.components.Slider(minimum=0.1, maximum=3.0, label="gamma")
|
326 |
+
gamma_slider.default = 1.0 # Set default value for gamma
|
327 |
+
|
328 |
+
# Define slider for slicing_threshold value
|
329 |
+
slicing_threshold_slider = gr.components.Slider(minimum=0, maximum=255, label="slicing threshold")
|
330 |
+
slicing_threshold_slider.default = 125.0 # Set default value for slicing_threshold
|
331 |
+
|
332 |
+
# Define slider for kernel size value
|
333 |
+
kernel_size_slider = gr.components.Slider(minimum=2, maximum=5, label="kernel size")
|
334 |
+
kernel_size_slider.default = 3 # Set default value for kernel size
|
335 |
+
|
336 |
+
# Define slider for kernel size value
|
337 |
+
sigma_slider = gr.components.Slider(minimum=2, maximum=5, label="sigma")
|
338 |
+
sigma_slider.default = 1 # Set default value for kernel size
|
339 |
+
|
340 |
+
def apply_preprocessing(image, selected_function, alpha, beta, c_log, gamma, slicing_threshold, kernel_size, sigma):
|
341 |
+
# Find the actual function based on the user-friendly name
|
342 |
+
selected_function_obj = None
|
343 |
+
for func_name, func_obj in preprocessing_functions:
|
344 |
+
if func_name == selected_function:
|
345 |
+
selected_function_obj = func_obj
|
346 |
+
break
|
347 |
+
if selected_function_obj is None:
|
348 |
+
raise ValueError("Selected function not found.")
|
349 |
+
# For gray level transformation, pass beta and gamma values
|
350 |
+
if selected_function == "Gray Level Transform":
|
351 |
+
processed_image = selected_function_obj(image, alpha=alpha, beta=beta)
|
352 |
+
elif selected_function == "Log Transform":
|
353 |
+
processed_image = selected_function_obj(image, c=c_log)
|
354 |
+
elif selected_function == "Power Law Transform":
|
355 |
+
processed_image = selected_function_obj(image, gamma=gamma)
|
356 |
+
elif selected_function == "intensity slicing":
|
357 |
+
processed_image = selected_function_obj(image, threshold=slicing_threshold)
|
358 |
+
elif selected_function == "mean filter":
|
359 |
+
processed_image = selected_function_obj(image, kernel_size=kernel_size)
|
360 |
+
elif selected_function == "gaussian filter":
|
361 |
+
processed_image = selected_function_obj(image, kernel_size=kernel_size, sigma=sigma)
|
362 |
+
elif selected_function == "gaussian filter":
|
363 |
+
processed_image = selected_function_obj(image, kernel_size=kernel_size, sigma=sigma)
|
364 |
+
elif selected_function == "min max filter":
|
365 |
+
processed_image = selected_function_obj(image, kernel_size=kernel_size)
|
366 |
+
elif selected_function == "median filter":
|
367 |
+
processed_image = selected_function_obj(image, kernel_size=kernel_size)
|
368 |
+
else:
|
369 |
+
print(selected_function_obj)
|
370 |
+
processed_image = selected_function_obj(image)
|
371 |
+
return processed_image
|
372 |
+
|
373 |
+
output_image = gr.components.Image(label="Processed Image")
|
374 |
+
|
375 |
+
# Create Gradio interface
|
376 |
+
gr.Interface(
|
377 |
+
fn=apply_preprocessing,
|
378 |
+
inputs=[input_image, function_selector, alpha_slider, beta_slider, c_log_slider, gamma_slider, slicing_threshold_slider, kernel_size_slider, sigma_slider],
|
379 |
+
outputs=output_image,
|
380 |
+
title="Elza3ama studio",
|
381 |
+
description="Upload an image and select a preprocessing function."
|
382 |
+
).launch()
|