Spaces:
Runtime error
Runtime error
revert to vision-agent code
Browse files
app.py
CHANGED
|
@@ -4,11 +4,8 @@ import gradio as gr
|
|
| 4 |
from typing import *
|
| 5 |
from pillow_heif import register_heif_opener
|
| 6 |
register_heif_opener()
|
| 7 |
-
from PIL import Image
|
| 8 |
-
import numpy as np
|
| 9 |
import vision_agent as va
|
| 10 |
-
from vision_agent.tools import
|
| 11 |
-
|
| 12 |
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image
|
| 13 |
|
| 14 |
from huggingface_hub import login
|
|
@@ -23,7 +20,7 @@ def detect_brain_tumor(image, debug: bool = False) -> str:
|
|
| 23 |
Detects a brain tumor in the given image and saves the image with bounding boxes.
|
| 24 |
|
| 25 |
Parameters:
|
| 26 |
-
image: The input image (
|
| 27 |
debug (bool): Flag to enable logging for debugging purposes.
|
| 28 |
|
| 29 |
Returns:
|
|
@@ -32,28 +29,12 @@ def detect_brain_tumor(image, debug: bool = False) -> str:
|
|
| 32 |
# Generate a unique output filename
|
| 33 |
output_path = f"./output/tumor_detection_{int(time.time())}.jpg"
|
| 34 |
|
| 35 |
-
# Ensure image is in the correct format
|
| 36 |
-
if isinstance(image, str):
|
| 37 |
-
# If image is a file path
|
| 38 |
-
image = Image.open(image)
|
| 39 |
-
elif isinstance(image, np.ndarray):
|
| 40 |
-
# If image is already a numpy array
|
| 41 |
-
image = Image.fromarray(image)
|
| 42 |
-
elif not isinstance(image, Image.Image):
|
| 43 |
-
raise ValueError("Unsupported image type. Please provide a PIL Image, numpy array, or file path.")
|
| 44 |
-
|
| 45 |
-
# Convert to RGB if it's not
|
| 46 |
-
image = image.convert('RGB')
|
| 47 |
-
|
| 48 |
-
# Convert PIL Image to numpy array for owl_v2
|
| 49 |
-
image_array = np.array(image)
|
| 50 |
-
|
| 51 |
if debug:
|
| 52 |
-
print(f"Image
|
| 53 |
|
| 54 |
# Step 2: Detect brain tumor using owl_v2
|
| 55 |
prompt = "detect brain tumor"
|
| 56 |
-
detections = owl_v2(prompt,
|
| 57 |
if debug:
|
| 58 |
print(f"Detections: {detections}")
|
| 59 |
|
|
@@ -69,11 +50,6 @@ def detect_brain_tumor(image, debug: bool = False) -> str:
|
|
| 69 |
|
| 70 |
return output_path
|
| 71 |
|
| 72 |
-
# Example usage (uncomment to run):
|
| 73 |
-
# detect_brain_tumor("/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/train_categories/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/output/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", debug=True)
|
| 74 |
-
|
| 75 |
-
#########
|
| 76 |
-
|
| 77 |
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
|
| 78 |
IMAGE_PROMPT="Are these cells healthy or cancerous?"
|
| 79 |
|
|
@@ -82,7 +58,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 82 |
with gr.Tab("Agentic Detection"):
|
| 83 |
with gr.Row():
|
| 84 |
with gr.Column():
|
| 85 |
-
image = gr.Image(type="
|
| 86 |
with gr.Column():
|
| 87 |
text_input = gr.Text(label="Input Text")
|
| 88 |
text_output = gr.Text(label="Text Output")
|
|
@@ -100,21 +76,19 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 100 |
outputs=chat_outputs,
|
| 101 |
)
|
| 102 |
|
| 103 |
-
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg"
|
| 104 |
-
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg"
|
| 105 |
-
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg"
|
| 106 |
-
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg"
|
| 107 |
-
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg"
|
| 108 |
-
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg"
|
| 109 |
-
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg"
|
| 110 |
-
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg"
|
| 111 |
]
|
| 112 |
gr.Examples(
|
| 113 |
examples=examples,
|
| 114 |
inputs=chat_inputs,
|
| 115 |
)
|
| 116 |
|
| 117 |
-
#########
|
| 118 |
-
|
| 119 |
if __name__ == "__main__":
|
| 120 |
demo.queue(max_size=10).launch(debug=True)
|
|
|
|
| 4 |
from typing import *
|
| 5 |
from pillow_heif import register_heif_opener
|
| 6 |
register_heif_opener()
|
|
|
|
|
|
|
| 7 |
import vision_agent as va
|
| 8 |
+
from vision_agent.tools import register_tool
|
|
|
|
| 9 |
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image
|
| 10 |
|
| 11 |
from huggingface_hub import login
|
|
|
|
| 20 |
Detects a brain tumor in the given image and saves the image with bounding boxes.
|
| 21 |
|
| 22 |
Parameters:
|
| 23 |
+
image: The input image (as provided by Gradio).
|
| 24 |
debug (bool): Flag to enable logging for debugging purposes.
|
| 25 |
|
| 26 |
Returns:
|
|
|
|
| 29 |
# Generate a unique output filename
|
| 30 |
output_path = f"./output/tumor_detection_{int(time.time())}.jpg"
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
if debug:
|
| 33 |
+
print(f"Image received")
|
| 34 |
|
| 35 |
# Step 2: Detect brain tumor using owl_v2
|
| 36 |
prompt = "detect brain tumor"
|
| 37 |
+
detections = owl_v2(prompt, image)
|
| 38 |
if debug:
|
| 39 |
print(f"Detections: {detections}")
|
| 40 |
|
|
|
|
| 50 |
|
| 51 |
return output_path
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
|
| 54 |
IMAGE_PROMPT="Are these cells healthy or cancerous?"
|
| 55 |
|
|
|
|
| 58 |
with gr.Tab("Agentic Detection"):
|
| 59 |
with gr.Row():
|
| 60 |
with gr.Column():
|
| 61 |
+
image = gr.Image(type="numpy")
|
| 62 |
with gr.Column():
|
| 63 |
text_input = gr.Text(label="Input Text")
|
| 64 |
text_output = gr.Text(label="Text Output")
|
|
|
|
| 76 |
outputs=chat_outputs,
|
| 77 |
)
|
| 78 |
|
| 79 |
+
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg"],
|
| 80 |
+
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg"],
|
| 81 |
+
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg"],
|
| 82 |
+
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg"],
|
| 83 |
+
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg"],
|
| 84 |
+
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg"],
|
| 85 |
+
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg"],
|
| 86 |
+
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg"],
|
| 87 |
]
|
| 88 |
gr.Examples(
|
| 89 |
examples=examples,
|
| 90 |
inputs=chat_inputs,
|
| 91 |
)
|
| 92 |
|
|
|
|
|
|
|
| 93 |
if __name__ == "__main__":
|
| 94 |
demo.queue(max_size=10).launch(debug=True)
|