Spaces:
Sleeping
Sleeping
Nikhitha2310
commited on
Upload 4 files
Browse files- app.py +116 -0
- requirements.txt +7 -0
- train_15_best.pt +3 -0
- train_17_best.pt +3 -0
app.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import cv2
|
4 |
+
from ultralytics import YOLO
|
5 |
+
from moviepy import VideoFileClip
|
6 |
+
|
7 |
+
|
8 |
+
with st.sidebar:
|
9 |
+
st.title("Control panel")
|
10 |
+
file = st.file_uploader("Choose an image or a video", type=["png", "jpg", "jpeg", "mp4"])
|
11 |
+
radio_button1 = st.radio("Model", ["model_train_17", "model_train_15"])
|
12 |
+
radio_button2=st.radio("Visualize",["No","Yes"])
|
13 |
+
|
14 |
+
st.header("Palm Tree Detection")
|
15 |
+
st.write(
|
16 |
+
'<p style="font-family: Arial, sans-serif; font-size: px; color: black; font-style: italic;">Counting the number of palm and coconut trees</p>',
|
17 |
+
unsafe_allow_html=True
|
18 |
+
)
|
19 |
+
|
20 |
+
status_placeholder = st.empty()
|
21 |
+
if radio_button1 == "model_train_17":
|
22 |
+
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_17_best.pt')
|
23 |
+
elif radio_button1 == "model_train_15":
|
24 |
+
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_15_best.pt')
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
def count_objects(results, class_names):
|
30 |
+
"""Count objects detected for each class."""
|
31 |
+
class_counts = {name: 0 for name in class_names.values()}
|
32 |
+
for box in results[0].boxes:
|
33 |
+
cls_idx = int(box.cls[0])
|
34 |
+
class_name = class_names.get(cls_idx, None)
|
35 |
+
|
36 |
+
if class_name:
|
37 |
+
class_counts[class_name] += 1
|
38 |
+
else:
|
39 |
+
st.warning(f"Unknown class index detected: {cls_idx}")
|
40 |
+
return class_counts
|
41 |
+
|
42 |
+
|
43 |
+
def run_inference(file):
|
44 |
+
file_type = file.type.split('/')[0]
|
45 |
+
|
46 |
+
if file_type == 'image':
|
47 |
+
image = Image.open(file)
|
48 |
+
st.image(image, caption="Uploaded Image", use_container_width=True)
|
49 |
+
status_placeholder.write("Processing...Please wait....")
|
50 |
+
results = model.predict(source=image, save=False)
|
51 |
+
|
52 |
+
class_names = model.names
|
53 |
+
counts = count_objects(results, class_names)
|
54 |
+
st.write("Detected objects:")
|
55 |
+
for obj, count in counts.items():
|
56 |
+
st.write(f"{obj}: {count}")
|
57 |
+
status_placeholder.empty()
|
58 |
+
|
59 |
+
if(radio_button2=="Yes"):
|
60 |
+
status_placeholder.write("Processing...")
|
61 |
+
st.image(results[0].plot(), caption="Detected Objects", use_container_width=True)
|
62 |
+
status_placeholder.empty()
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
# elif file_type == 'video':
|
67 |
+
# temp_file = f"temp_{file.name}"
|
68 |
+
# compressed_file = f"compressed_{file.name}"
|
69 |
+
|
70 |
+
# # Save the uploaded video to a temporary file
|
71 |
+
# with open(temp_file, "wb") as f:
|
72 |
+
# f.write(file.getbuffer())
|
73 |
+
|
74 |
+
# # Compress the video
|
75 |
+
# st.write("Compressing video...")
|
76 |
+
# clip = VideoFileClip(temp_file)
|
77 |
+
# clip.write_videofile(compressed_file, codec="libx264", audio_codec="aac")
|
78 |
+
# clip.close()
|
79 |
+
# st.write("Compression complete. Processing video...")
|
80 |
+
|
81 |
+
# # Process the compressed video
|
82 |
+
# cap = cv2.VideoCapture(compressed_file)
|
83 |
+
# stframe = st.empty()
|
84 |
+
# total_counts = {name: 0 for name in model.names}
|
85 |
+
|
86 |
+
# while cap.isOpened():
|
87 |
+
# ret, frame = cap.read()
|
88 |
+
# if not ret:
|
89 |
+
# break
|
90 |
+
|
91 |
+
# # Perform inference on each video frame
|
92 |
+
# results = model.predict(source=frame, save=False)
|
93 |
+
|
94 |
+
# # Count the objects in the frame
|
95 |
+
# frame_counts = {model.names[int(box.cls[0])]: 0 for box in results[0].boxes}
|
96 |
+
# for box in results[0].boxes:
|
97 |
+
# class_name = model.names[int(box.cls[0])]
|
98 |
+
# frame_counts[class_name] += 1
|
99 |
+
# for obj, count in frame_counts.items():
|
100 |
+
# total_counts[obj] += count
|
101 |
+
|
102 |
+
# # Display the processed video frame
|
103 |
+
# stframe.image(results[0].plot(), channels="BGR", use_container_width=True)
|
104 |
+
|
105 |
+
# cap.release()
|
106 |
+
# st.write("Video processing complete.")
|
107 |
+
|
108 |
+
# # Display total counts
|
109 |
+
# st.write("Total detected objects in the video:")
|
110 |
+
# for obj, count in total_counts.items():
|
111 |
+
# st.write(f"{obj}: {count}")
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
if file is not None:
|
116 |
+
run_inference(file)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.41.1
|
2 |
+
opencv-python==4.10.0.84
|
3 |
+
pillow==11.0.0
|
4 |
+
torch==2.5.1
|
5 |
+
torchvision==0.20.1
|
6 |
+
ultralytics==8.3.51
|
7 |
+
moviepy
|
train_15_best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d15cbb17afd7f09af9e68e018179c80c78b2a1a94181d4b3b1fe7a573f52c05
|
3 |
+
size 23001379
|
train_17_best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:241c458ad8511c167e0a4ac16e8eb9f44687481d3353b0ee2cd803c2c2eab86c
|
3 |
+
size 52513355
|