Nikhitha2310's picture
Update app.py
f29fe4a verified
raw
history blame
3.95 kB
import streamlit as st
from PIL import Image
import cv2
from ultralytics import YOLO
with st.sidebar:
st.title("Control panel")
file = st.file_uploader("Choose an image or a video", type=["png", "jpg", "jpeg", "mp4"])
radio_button1 = st.radio("Model", ["model_train_17", "model_train_15"])
radio_button2=st.radio("Visualize",["No","Yes"])
st.header("Palm Tree Detection")
st.write(
'<p style="font-family: Arial, sans-serif; font-size: px; color: black; font-style: italic;">Counting the number of palm and coconut trees</p>',
unsafe_allow_html=True
)
status_placeholder = st.empty()
if radio_button1 == "model_train_17":
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_17_best.pt')
elif radio_button1 == "model_train_15":
model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_15_best.pt')
def count_objects(results, class_names):
"""Count objects detected for each class."""
class_counts = {name: 0 for name in class_names.values()}
for box in results[0].boxes:
cls_idx = int(box.cls[0])
class_name = class_names.get(cls_idx, None)
if class_name:
class_counts[class_name] += 1
else:
st.warning(f"Unknown class index detected: {cls_idx}")
return class_counts
def run_inference(file):
file_type = file.type.split('/')[0]
if file_type == 'image':
image = Image.open(file)
st.image(image, caption="Uploaded Image", use_container_width=True)
status_placeholder.write("Processing...Please wait....")
results = model.predict(source=image, save=False)
class_names = model.names
counts = count_objects(results, class_names)
st.write("Detected objects:")
for obj, count in counts.items():
st.write(f"{obj}: {count}")
status_placeholder.empty()
if(radio_button2=="Yes"):
status_placeholder.write("Processing...")
st.image(results[0].plot(), caption="Detected Objects", use_container_width=True)
status_placeholder.empty()
# elif file_type == 'video':
# temp_file = f"temp_{file.name}"
# compressed_file = f"compressed_{file.name}"
# # Save the uploaded video to a temporary file
# with open(temp_file, "wb") as f:
# f.write(file.getbuffer())
# # Compress the video
# st.write("Compressing video...")
# clip = VideoFileClip(temp_file)
# clip.write_videofile(compressed_file, codec="libx264", audio_codec="aac")
# clip.close()
# st.write("Compression complete. Processing video...")
# # Process the compressed video
# cap = cv2.VideoCapture(compressed_file)
# stframe = st.empty()
# total_counts = {name: 0 for name in model.names}
# while cap.isOpened():
# ret, frame = cap.read()
# if not ret:
# break
# # Perform inference on each video frame
# results = model.predict(source=frame, save=False)
# # Count the objects in the frame
# frame_counts = {model.names[int(box.cls[0])]: 0 for box in results[0].boxes}
# for box in results[0].boxes:
# class_name = model.names[int(box.cls[0])]
# frame_counts[class_name] += 1
# for obj, count in frame_counts.items():
# total_counts[obj] += count
# # Display the processed video frame
# stframe.image(results[0].plot(), channels="BGR", use_container_width=True)
# cap.release()
# st.write("Video processing complete.")
# # Display total counts
# st.write("Total detected objects in the video:")
# for obj, count in total_counts.items():
# st.write(f"{obj}: {count}")
if file is not None:
run_inference(file)