luminoussg commited on
Commit
4e37a98
·
verified ·
1 Parent(s): c603f98

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +224 -0
app.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import torch
4
+ import time
5
+ import numpy as np
6
+ from ultralytics import YOLO
7
+ import os
8
+
9
+ # Optimize CPU usage
10
+ torch.set_num_threads(8)
11
+ MODEL_DIR = "models"
12
+
13
+ stop_processing = False # Global flag to stop processing
14
+
15
+ def get_model_options():
16
+ models = {}
17
+ for root, dirs, files in os.walk(MODEL_DIR):
18
+ for file in files:
19
+ if file.endswith(".pt"):
20
+ model_name = os.path.basename(os.path.dirname(root))
21
+ models[model_name] = os.path.join(root, file)
22
+ return models
23
+
24
+ model_options = get_model_options()
25
+
26
+ def annotate_frame(frame, results):
27
+ for box in results[0].boxes:
28
+ xyxy = box.xyxy[0].numpy()
29
+ class_id = int(box.cls[0].item())
30
+ label = results[0].names[class_id]
31
+
32
+ start_point = (int(xyxy[0]), int(xyxy[1]))
33
+ end_point = (int(xyxy[2]), int(xyxy[3]))
34
+ color = (0, 255, 0)
35
+ thickness = 2
36
+ cv2.rectangle(frame, start_point, end_point, color, thickness)
37
+
38
+ font = cv2.FONT_HERSHEY_SIMPLEX
39
+ font_scale = 0.5
40
+ font_thickness = 1
41
+ label_position = (int(xyxy[0]), int(xyxy[1] - 10))
42
+ cv2.putText(frame, label, label_position, font, font_scale, color, font_thickness)
43
+ return frame
44
+
45
+ def process_image(model_name, image, confidence_threshold, iou_threshold):
46
+ model_path = model_options[model_name]
47
+ model = YOLO(model_path).to('cpu')
48
+
49
+ frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
50
+ with torch.inference_mode():
51
+ results = model(frame, conf=confidence_threshold, iou=iou_threshold)
52
+ annotated_frame = annotate_frame(frame, results)
53
+ annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
54
+ return annotated_frame, "N/A"
55
+
56
+ def run_inference(mode, model_name, image, video, confidence_threshold, iou_threshold):
57
+ global stop_processing
58
+ stop_processing = False # Reset stop flag at the start
59
+
60
+ if mode == "Image":
61
+ if image is None:
62
+ yield None, None, "Please upload an image."
63
+ return
64
+ annotated_img, fps = process_image(model_name, image, confidence_threshold, iou_threshold)
65
+ yield annotated_img, None, fps
66
+ else:
67
+ if video is None:
68
+ yield None, None, "Please upload a video."
69
+ return
70
+
71
+ model_path = model_options[model_name]
72
+ model = YOLO(model_path).to('cpu')
73
+ cap = cv2.VideoCapture(video)
74
+
75
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
76
+ if frame_count <= 0:
77
+ frame_count = 1
78
+
79
+ output_frames = []
80
+ fps_list = []
81
+ processed_count = 0
82
+
83
+ while not stop_processing:
84
+ ret, frame = cap.read()
85
+ if not ret:
86
+ break
87
+
88
+ start_time = time.time()
89
+ with torch.inference_mode():
90
+ results = model(frame, conf=confidence_threshold, iou=iou_threshold)
91
+
92
+ annotated_frame = annotate_frame(frame, results)
93
+ output_frames.append(annotated_frame)
94
+
95
+ fps_val = 1 / (time.time() - start_time)
96
+ fps_list.append(fps_val)
97
+
98
+ processed_count += 1
99
+ progress_fraction = processed_count / frame_count
100
+
101
+ # Yield progress every few frames
102
+ if processed_count % 5 == 0:
103
+ yield None, None, f"Processing... {progress_fraction * 100:.2f}%"
104
+
105
+ if stop_processing:
106
+ yield None, None, "Processing canceled."
107
+ return
108
+
109
+ cap.release()
110
+
111
+ if len(output_frames) > 0 and not stop_processing:
112
+ avg_fps = sum(fps_list) / len(fps_list) if fps_list else 0
113
+ height, width, _ = output_frames[0].shape
114
+ output_video_path = "output.mp4"
115
+ out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))
116
+ for frame in output_frames:
117
+ out.write(frame)
118
+ out.release()
119
+
120
+ yield None, output_video_path, f"Average FPS: {avg_fps:.2f}"
121
+ elif not stop_processing:
122
+ yield None, None, "No frames processed."
123
+
124
+ def cancel_processing():
125
+ global stop_processing
126
+ stop_processing = True
127
+ return "Cancel signal sent."
128
+
129
+ def start_app():
130
+ model_names = list(model_options.keys())
131
+
132
+ with gr.Blocks() as app:
133
+ # **Instructional Message Added Here**
134
+ gr.Markdown("""
135
+ ### Welcome to the YOLO Inference App!
136
+ **How to Use:**
137
+ 1. **Select Mode:**
138
+ - Choose between **Image** or **Video** processing.
139
+ 2. **Select Model:**
140
+ - Pick a pre-trained YOLO model from the dropdown menu.
141
+ 3. **Upload Your File:**
142
+ - For **Image** mode, upload an image (e.g., `pothole.jpg`).
143
+ - For **Video** mode, upload a video (e.g., `potholeall.mp4` or `electric bus fire.mp4`).
144
+ 4. **Adjust Thresholds:**
145
+ - **Confidence Threshold:** Determines the minimum confidence for detections.
146
+ - **IoU Threshold:** Determines the Intersection over Union threshold for non-maximum suppression.
147
+ 5. **Start Processing:**
148
+ - Click on **Start Processing** to begin inference.
149
+ - You can cancel the processing at any time by clicking **Cancel Processing**.
150
+ **Example Files:**
151
+ - **Image:** `pothole.jpg`
152
+ - **Videos:** `potholeall.mp4`, `electric bus fire.mp4`
153
+ """)
154
+
155
+ gr.Markdown("## YOLO Inference (Image or Video) with Progress & Cancel")
156
+
157
+ with gr.Row():
158
+ mode = gr.Radio(["Image", "Video"], value="Image", label="Mode")
159
+ model_selector = gr.Dropdown(choices=model_names, label="Select Model", value=model_names[0])
160
+
161
+ image_input = gr.Image(label="Upload Image", visible=True)
162
+ video_input = gr.Video(label="Upload Video", visible=False)
163
+
164
+ confidence_slider = gr.Slider(0.1, 1.0, value=0.3, step=0.1, label="Confidence Threshold")
165
+ iou_slider = gr.Slider(0.1, 1.0, value=0.001, step=0.001, label="IoU Threshold")
166
+
167
+ annotated_image_output = gr.Image(label="Annotated Image", visible=True)
168
+ annotated_video_output = gr.Video(label="Output Video", visible=False)
169
+ fps_output = gr.Textbox(label="Status / Average FPS", interactive=False)
170
+
171
+ start_button = gr.Button("Start Processing")
172
+ cancel_button = gr.Button("Cancel Processing", variant="stop")
173
+
174
+ # Updated example files with 'examples/' path and renamed video file
175
+ examples = gr.Examples(
176
+ examples=[
177
+ ["examples/pothole.jpg", None, 0.3, 0.001], # Example for image
178
+ [None, "examples/potholeall.mp4", 0.3, 0.001], # Renamed video example
179
+ [None, "examples/electric bus fire.mp4", 0.5, 0.001] # Updated confidence threshold for new video example
180
+ ],
181
+ inputs=[image_input, video_input, confidence_slider, iou_slider]
182
+ )
183
+
184
+ def update_visibility(selected_mode):
185
+ if selected_mode == "Image":
186
+ return (
187
+ gr.update(visible=True),
188
+ gr.update(visible=False),
189
+ gr.update(visible=True),
190
+ gr.update(visible=False)
191
+ )
192
+ else:
193
+ return (
194
+ gr.update(visible=False),
195
+ gr.update(visible=True),
196
+ gr.update(visible=False),
197
+ gr.update(visible=True)
198
+ )
199
+
200
+ mode.change(
201
+ update_visibility,
202
+ inputs=mode,
203
+ outputs=[image_input, video_input, annotated_image_output, annotated_video_output]
204
+ )
205
+
206
+ start_button.click(
207
+ fn=run_inference,
208
+ inputs=[mode, model_selector, image_input, video_input, confidence_slider, iou_slider],
209
+ outputs=[annotated_image_output, annotated_video_output, fps_output],
210
+ queue=True
211
+ )
212
+
213
+ cancel_button.click(
214
+ fn=cancel_processing,
215
+ inputs=[],
216
+ outputs=[fps_output],
217
+ queue=False
218
+ )
219
+
220
+ return app
221
+
222
+ if __name__ == "__main__":
223
+ app = start_app()
224
+ app.launch()