Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
from keras.utils import normalize
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
def dice_coef(y_true, y_pred):
|
| 9 |
+
smooth = 1e-5
|
| 10 |
+
intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
|
| 11 |
+
union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
|
| 12 |
+
return K.mean((2.0 * intersection + smooth) / (union + smooth), axis=0)
|
| 13 |
+
|
| 14 |
+
def predict_segmentation(image):
|
| 15 |
+
original_size = (image.shape[1], image.shape[0]) # (width, height)
|
| 16 |
+
|
| 17 |
+
# Resize to the model's input size
|
| 18 |
+
SIZE_X = 128
|
| 19 |
+
SIZE_Y = 128
|
| 20 |
+
img = cv2.resize(image, (SIZE_Y, SIZE_X))
|
| 21 |
+
|
| 22 |
+
if len(img.shape) == 3 and img.shape[2] == 3: # If the image is RGB
|
| 23 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert to grayscale
|
| 24 |
+
|
| 25 |
+
img = np.expand_dims(img, axis=2) # Add the channel dimension
|
| 26 |
+
img = normalize(img, axis=1)
|
| 27 |
+
X_test = np.expand_dims(img, axis=0) # Add the batch dimension
|
| 28 |
+
|
| 29 |
+
custom_objects = {'dice_coef': dice_coef}
|
| 30 |
+
with tf.keras.utils.custom_object_scope(custom_objects):
|
| 31 |
+
model = tf.keras.models.load_model("model100.h5")
|
| 32 |
+
|
| 33 |
+
# Get the prediction
|
| 34 |
+
prediction = model.predict(X_test)
|
| 35 |
+
predicted_img = np.argmax(prediction, axis=3)[0, :, :]
|
| 36 |
+
|
| 37 |
+
# Resize prediction back to original image size
|
| 38 |
+
predicted_img_resized = cv2.resize(predicted_img, original_size, interpolation=cv2.INTER_NEAREST)
|
| 39 |
+
|
| 40 |
+
# Create an RGBA image with a transparent background
|
| 41 |
+
rgba_img = np.zeros((predicted_img_resized.shape[0], predicted_img_resized.shape[1], 4), dtype=np.uint8)
|
| 42 |
+
|
| 43 |
+
# Define the color for the segmented area (e.g., red)
|
| 44 |
+
segmented_color = [255, 0, 0] # Red color in RGB
|
| 45 |
+
|
| 46 |
+
# Set the segmented area to the desired color
|
| 47 |
+
for i in range(3):
|
| 48 |
+
rgba_img[:, :, i] = np.where(predicted_img_resized > 0, segmented_color[i], 0)
|
| 49 |
+
|
| 50 |
+
# Create an alpha channel: 255 where there is segmentation, 0 otherwise
|
| 51 |
+
rgba_img[:, :, 3] = np.where(predicted_img_resized > 0, 255, 0)
|
| 52 |
+
|
| 53 |
+
# Convert the numpy array to an image
|
| 54 |
+
output_image = Image.fromarray(rgba_img)
|
| 55 |
+
|
| 56 |
+
# Save the image as PNG to return it
|
| 57 |
+
output_image_path = "/tmp/segmented_output.png"
|
| 58 |
+
output_image.save(output_image_path)
|
| 59 |
+
|
| 60 |
+
return output_image_path
|
| 61 |
+
|
| 62 |
+
# Gradio Interface
|
| 63 |
+
iface = gr.Interface(
|
| 64 |
+
fn=predict_segmentation,
|
| 65 |
+
inputs="image",
|
| 66 |
+
outputs="file", # Return the file path to download the PNG
|
| 67 |
+
live=False
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
iface.launch(share=True)
|