Nikhitha2310 commited on
Commit
f29fe4a
·
verified ·
1 Parent(s): a7218e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -116
app.py CHANGED
@@ -1,116 +1,115 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import cv2
4
- from ultralytics import YOLO
5
- from moviepy import VideoFileClip
6
-
7
-
8
- with st.sidebar:
9
- st.title("Control panel")
10
- file = st.file_uploader("Choose an image or a video", type=["png", "jpg", "jpeg", "mp4"])
11
- radio_button1 = st.radio("Model", ["model_train_17", "model_train_15"])
12
- radio_button2=st.radio("Visualize",["No","Yes"])
13
-
14
- st.header("Palm Tree Detection")
15
- st.write(
16
- '<p style="font-family: Arial, sans-serif; font-size: px; color: black; font-style: italic;">Counting the number of palm and coconut trees</p>',
17
- unsafe_allow_html=True
18
- )
19
-
20
- status_placeholder = st.empty()
21
- if radio_button1 == "model_train_17":
22
- model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_17_best.pt')
23
- elif radio_button1 == "model_train_15":
24
- model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_15_best.pt')
25
-
26
-
27
-
28
-
29
- def count_objects(results, class_names):
30
- """Count objects detected for each class."""
31
- class_counts = {name: 0 for name in class_names.values()}
32
- for box in results[0].boxes:
33
- cls_idx = int(box.cls[0])
34
- class_name = class_names.get(cls_idx, None)
35
-
36
- if class_name:
37
- class_counts[class_name] += 1
38
- else:
39
- st.warning(f"Unknown class index detected: {cls_idx}")
40
- return class_counts
41
-
42
-
43
- def run_inference(file):
44
- file_type = file.type.split('/')[0]
45
-
46
- if file_type == 'image':
47
- image = Image.open(file)
48
- st.image(image, caption="Uploaded Image", use_container_width=True)
49
- status_placeholder.write("Processing...Please wait....")
50
- results = model.predict(source=image, save=False)
51
-
52
- class_names = model.names
53
- counts = count_objects(results, class_names)
54
- st.write("Detected objects:")
55
- for obj, count in counts.items():
56
- st.write(f"{obj}: {count}")
57
- status_placeholder.empty()
58
-
59
- if(radio_button2=="Yes"):
60
- status_placeholder.write("Processing...")
61
- st.image(results[0].plot(), caption="Detected Objects", use_container_width=True)
62
- status_placeholder.empty()
63
-
64
-
65
-
66
- # elif file_type == 'video':
67
- # temp_file = f"temp_{file.name}"
68
- # compressed_file = f"compressed_{file.name}"
69
-
70
- # # Save the uploaded video to a temporary file
71
- # with open(temp_file, "wb") as f:
72
- # f.write(file.getbuffer())
73
-
74
- # # Compress the video
75
- # st.write("Compressing video...")
76
- # clip = VideoFileClip(temp_file)
77
- # clip.write_videofile(compressed_file, codec="libx264", audio_codec="aac")
78
- # clip.close()
79
- # st.write("Compression complete. Processing video...")
80
-
81
- # # Process the compressed video
82
- # cap = cv2.VideoCapture(compressed_file)
83
- # stframe = st.empty()
84
- # total_counts = {name: 0 for name in model.names}
85
-
86
- # while cap.isOpened():
87
- # ret, frame = cap.read()
88
- # if not ret:
89
- # break
90
-
91
- # # Perform inference on each video frame
92
- # results = model.predict(source=frame, save=False)
93
-
94
- # # Count the objects in the frame
95
- # frame_counts = {model.names[int(box.cls[0])]: 0 for box in results[0].boxes}
96
- # for box in results[0].boxes:
97
- # class_name = model.names[int(box.cls[0])]
98
- # frame_counts[class_name] += 1
99
- # for obj, count in frame_counts.items():
100
- # total_counts[obj] += count
101
-
102
- # # Display the processed video frame
103
- # stframe.image(results[0].plot(), channels="BGR", use_container_width=True)
104
-
105
- # cap.release()
106
- # st.write("Video processing complete.")
107
-
108
- # # Display total counts
109
- # st.write("Total detected objects in the video:")
110
- # for obj, count in total_counts.items():
111
- # st.write(f"{obj}: {count}")
112
-
113
-
114
-
115
- if file is not None:
116
- run_inference(file)
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import cv2
4
+ from ultralytics import YOLO
5
+
6
+
7
+ with st.sidebar:
8
+ st.title("Control panel")
9
+ file = st.file_uploader("Choose an image or a video", type=["png", "jpg", "jpeg", "mp4"])
10
+ radio_button1 = st.radio("Model", ["model_train_17", "model_train_15"])
11
+ radio_button2=st.radio("Visualize",["No","Yes"])
12
+
13
+ st.header("Palm Tree Detection")
14
+ st.write(
15
+ '<p style="font-family: Arial, sans-serif; font-size: px; color: black; font-style: italic;">Counting the number of palm and coconut trees</p>',
16
+ unsafe_allow_html=True
17
+ )
18
+
19
+ status_placeholder = st.empty()
20
+ if radio_button1 == "model_train_17":
21
+ model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_17_best.pt')
22
+ elif radio_button1 == "model_train_15":
23
+ model = YOLO(r'C:\Users\Tectoro\Desktop\Palm tree detection\train_15_best.pt')
24
+
25
+
26
+
27
+
28
+ def count_objects(results, class_names):
29
+ """Count objects detected for each class."""
30
+ class_counts = {name: 0 for name in class_names.values()}
31
+ for box in results[0].boxes:
32
+ cls_idx = int(box.cls[0])
33
+ class_name = class_names.get(cls_idx, None)
34
+
35
+ if class_name:
36
+ class_counts[class_name] += 1
37
+ else:
38
+ st.warning(f"Unknown class index detected: {cls_idx}")
39
+ return class_counts
40
+
41
+
42
+ def run_inference(file):
43
+ file_type = file.type.split('/')[0]
44
+
45
+ if file_type == 'image':
46
+ image = Image.open(file)
47
+ st.image(image, caption="Uploaded Image", use_container_width=True)
48
+ status_placeholder.write("Processing...Please wait....")
49
+ results = model.predict(source=image, save=False)
50
+
51
+ class_names = model.names
52
+ counts = count_objects(results, class_names)
53
+ st.write("Detected objects:")
54
+ for obj, count in counts.items():
55
+ st.write(f"{obj}: {count}")
56
+ status_placeholder.empty()
57
+
58
+ if(radio_button2=="Yes"):
59
+ status_placeholder.write("Processing...")
60
+ st.image(results[0].plot(), caption="Detected Objects", use_container_width=True)
61
+ status_placeholder.empty()
62
+
63
+
64
+
65
+ # elif file_type == 'video':
66
+ # temp_file = f"temp_{file.name}"
67
+ # compressed_file = f"compressed_{file.name}"
68
+
69
+ # # Save the uploaded video to a temporary file
70
+ # with open(temp_file, "wb") as f:
71
+ # f.write(file.getbuffer())
72
+
73
+ # # Compress the video
74
+ # st.write("Compressing video...")
75
+ # clip = VideoFileClip(temp_file)
76
+ # clip.write_videofile(compressed_file, codec="libx264", audio_codec="aac")
77
+ # clip.close()
78
+ # st.write("Compression complete. Processing video...")
79
+
80
+ # # Process the compressed video
81
+ # cap = cv2.VideoCapture(compressed_file)
82
+ # stframe = st.empty()
83
+ # total_counts = {name: 0 for name in model.names}
84
+
85
+ # while cap.isOpened():
86
+ # ret, frame = cap.read()
87
+ # if not ret:
88
+ # break
89
+
90
+ # # Perform inference on each video frame
91
+ # results = model.predict(source=frame, save=False)
92
+
93
+ # # Count the objects in the frame
94
+ # frame_counts = {model.names[int(box.cls[0])]: 0 for box in results[0].boxes}
95
+ # for box in results[0].boxes:
96
+ # class_name = model.names[int(box.cls[0])]
97
+ # frame_counts[class_name] += 1
98
+ # for obj, count in frame_counts.items():
99
+ # total_counts[obj] += count
100
+
101
+ # # Display the processed video frame
102
+ # stframe.image(results[0].plot(), channels="BGR", use_container_width=True)
103
+
104
+ # cap.release()
105
+ # st.write("Video processing complete.")
106
+
107
+ # # Display total counts
108
+ # st.write("Total detected objects in the video:")
109
+ # for obj, count in total_counts.items():
110
+ # st.write(f"{obj}: {count}")
111
+
112
+
113
+
114
+ if file is not None:
115
+ run_inference(file)