Spaces:
Sleeping
Sleeping
Nikhitha2310
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,116 +1,115 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from PIL import Image
|
3 |
-
import cv2
|
4 |
-
from ultralytics import YOLO
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
st.
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
st.
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
image = Image
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
#
|
67 |
-
#
|
68 |
-
|
69 |
-
|
70 |
-
#
|
71 |
-
#
|
72 |
-
|
73 |
-
|
74 |
-
#
|
75 |
-
#
|
76 |
-
# clip =
|
77 |
-
# clip.
|
78 |
-
#
|
79 |
-
|
80 |
-
|
81 |
-
#
|
82 |
-
#
|
83 |
-
#
|
84 |
-
|
85 |
-
|
86 |
-
#
|
87 |
-
#
|
88 |
-
#
|
89 |
-
|
90 |
-
|
91 |
-
#
|
92 |
-
|
93 |
-
|
94 |
-
#
|
95 |
-
#
|
96 |
-
#
|
97 |
-
# class_name
|
98 |
-
#
|
99 |
-
#
|
100 |
-
|
101 |
-
|
102 |
-
#
|
103 |
-
|
104 |
-
|
105 |
-
#
|
106 |
-
|
107 |
-
|
108 |
-
#
|
109 |
-
#
|
110 |
-
#
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
run_inference(file)
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import cv2
|
4 |
+
from ultralytics import YOLO
|
5 |
+
|
6 |
+
|
7 |
+
with st.sidebar:
|
8 |
+
st.title("Control panel")
|
9 |
+
file = st.file_uploader("Choose an image or a video", type=["png", "jpg", "jpeg", "mp4"])
|
10 |
+
radio_button1 = st.radio("Model", ["model_train_17", "model_train_15"])
|
11 |
+
radio_button2=st.radio("Visualize",["No","Yes"])
|
12 |
+
|
13 |
+
st.header("Palm Tree Detection")
|
14 |
+
st.write(
|
15 |
+
'<p style="font-family: Arial, sans-serif; font-size: px; color: black; font-style: italic;">Counting the number of palm and coconut trees</p>',
|
16 |
+
unsafe_allow_html=True
|
17 |
+
)
|
18 |
+
|
19 |
+
status_placeholder = st.empty()
|
20 |
+
if radio_button1 == "model_train_17":
|
21 |
+
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_17_best.pt')
|
22 |
+
elif radio_button1 == "model_train_15":
|
23 |
+
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_15_best.pt')
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
def count_objects(results, class_names):
|
29 |
+
"""Count objects detected for each class."""
|
30 |
+
class_counts = {name: 0 for name in class_names.values()}
|
31 |
+
for box in results[0].boxes:
|
32 |
+
cls_idx = int(box.cls[0])
|
33 |
+
class_name = class_names.get(cls_idx, None)
|
34 |
+
|
35 |
+
if class_name:
|
36 |
+
class_counts[class_name] += 1
|
37 |
+
else:
|
38 |
+
st.warning(f"Unknown class index detected: {cls_idx}")
|
39 |
+
return class_counts
|
40 |
+
|
41 |
+
|
42 |
+
def run_inference(file):
|
43 |
+
file_type = file.type.split('/')[0]
|
44 |
+
|
45 |
+
if file_type == 'image':
|
46 |
+
image = Image.open(file)
|
47 |
+
st.image(image, caption="Uploaded Image", use_container_width=True)
|
48 |
+
status_placeholder.write("Processing...Please wait....")
|
49 |
+
results = model.predict(source=image, save=False)
|
50 |
+
|
51 |
+
class_names = model.names
|
52 |
+
counts = count_objects(results, class_names)
|
53 |
+
st.write("Detected objects:")
|
54 |
+
for obj, count in counts.items():
|
55 |
+
st.write(f"{obj}: {count}")
|
56 |
+
status_placeholder.empty()
|
57 |
+
|
58 |
+
if(radio_button2=="Yes"):
|
59 |
+
status_placeholder.write("Processing...")
|
60 |
+
st.image(results[0].plot(), caption="Detected Objects", use_container_width=True)
|
61 |
+
status_placeholder.empty()
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
# elif file_type == 'video':
|
66 |
+
# temp_file = f"temp_{file.name}"
|
67 |
+
# compressed_file = f"compressed_{file.name}"
|
68 |
+
|
69 |
+
# # Save the uploaded video to a temporary file
|
70 |
+
# with open(temp_file, "wb") as f:
|
71 |
+
# f.write(file.getbuffer())
|
72 |
+
|
73 |
+
# # Compress the video
|
74 |
+
# st.write("Compressing video...")
|
75 |
+
# clip = VideoFileClip(temp_file)
|
76 |
+
# clip.write_videofile(compressed_file, codec="libx264", audio_codec="aac")
|
77 |
+
# clip.close()
|
78 |
+
# st.write("Compression complete. Processing video...")
|
79 |
+
|
80 |
+
# # Process the compressed video
|
81 |
+
# cap = cv2.VideoCapture(compressed_file)
|
82 |
+
# stframe = st.empty()
|
83 |
+
# total_counts = {name: 0 for name in model.names}
|
84 |
+
|
85 |
+
# while cap.isOpened():
|
86 |
+
# ret, frame = cap.read()
|
87 |
+
# if not ret:
|
88 |
+
# break
|
89 |
+
|
90 |
+
# # Perform inference on each video frame
|
91 |
+
# results = model.predict(source=frame, save=False)
|
92 |
+
|
93 |
+
# # Count the objects in the frame
|
94 |
+
# frame_counts = {model.names[int(box.cls[0])]: 0 for box in results[0].boxes}
|
95 |
+
# for box in results[0].boxes:
|
96 |
+
# class_name = model.names[int(box.cls[0])]
|
97 |
+
# frame_counts[class_name] += 1
|
98 |
+
# for obj, count in frame_counts.items():
|
99 |
+
# total_counts[obj] += count
|
100 |
+
|
101 |
+
# # Display the processed video frame
|
102 |
+
# stframe.image(results[0].plot(), channels="BGR", use_container_width=True)
|
103 |
+
|
104 |
+
# cap.release()
|
105 |
+
# st.write("Video processing complete.")
|
106 |
+
|
107 |
+
# # Display total counts
|
108 |
+
# st.write("Total detected objects in the video:")
|
109 |
+
# for obj, count in total_counts.items():
|
110 |
+
# st.write(f"{obj}: {count}")
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
if file is not None:
|
115 |
+
run_inference(file)
|
|