Btr4k commited on
Commit
fec6b6f
Β·
1 Parent(s): e950521

Initial commit: YOLO detection app

Browse files
Files changed (5) hide show
  1. .gitignore +4 -0
  2. README.md +13 -11
  3. app.py +192 -0
  4. logo-h.png +0 -0
  5. requirements.txt +6 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Add these to .gitignore
2
+ venv/
3
+ __pycache__/
4
+ *.pyc
README.md CHANGED
@@ -1,12 +1,14 @@
1
- ---
2
- title: Yolov8 Object Detection
3
- emoji: πŸš€
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.16.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLO Object Detection App
 
 
 
 
 
 
 
 
 
2
 
3
+ This application uses YOLOv8 to perform object detection on images and videos.
4
+
5
+ ## Features
6
+ - Support for both image and video uploads
7
+ - Real-time object detection
8
+ - Progress tracking for video processing
9
+ - Easy-to-use interface
10
+
11
+ ## Usage
12
+ 1. Upload an image or video file
13
+ 2. Wait for processing
14
+ 3. View detection results
app.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ from PIL import Image
4
+ import cv2
5
+ import numpy as np
6
+ import tempfile
7
+ import os
8
+
9
+ # Load the YOLOv8 model
10
+ model = YOLO('yolov8n.pt')
11
+
12
+ def process_image(image):
13
+ results = model(image)
14
+ # Get detection information
15
+ boxes = results[0].boxes
16
+ detection_info = []
17
+ for box in boxes:
18
+ class_id = int(box.cls[0])
19
+ class_name = results[0].names[class_id]
20
+ confidence = float(box.conf[0])
21
+ detection_info.append(f"{class_name}: {confidence:.2%}")
22
+
23
+ return Image.fromarray(results[0].plot()), "\n".join(detection_info)
24
+
25
+ def process_video(video_path):
26
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
27
+ output_path = temp_file.name
28
+
29
+ cap = cv2.VideoCapture(video_path)
30
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
31
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
32
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
33
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
34
+
35
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
36
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
37
+
38
+ detection_summary = []
39
+ frame_count = 0
40
+
41
+ try:
42
+ while cap.isOpened():
43
+ ret, frame = cap.read()
44
+ if not ret:
45
+ break
46
+
47
+ frame_count += 1
48
+ results = model(frame)
49
+
50
+ # Collect detection information for this frame
51
+ if frame_count % int(fps) == 0: # Sample every second
52
+ for box in results[0].boxes:
53
+ class_id = int(box.cls[0])
54
+ class_name = results[0].names[class_id]
55
+ detection_summary.append(class_name)
56
+
57
+ annotated_frame = results[0].plot()
58
+ out.write(annotated_frame)
59
+
60
+ finally:
61
+ cap.release()
62
+ out.release()
63
+
64
+ # Create summary of detected objects
65
+ if detection_summary:
66
+ from collections import Counter
67
+ counts = Counter(detection_summary)
68
+ summary = "\n".join([f"{obj}: {count} occurrences" for obj, count in counts.most_common()])
69
+ else:
70
+ summary = "No objects detected"
71
+
72
+ return output_path, summary
73
+
74
+ def detect_objects(media):
75
+ if media is None:
76
+ return None, None, None, "Please upload an image or video to begin detection.", gr.update(visible=True), gr.update(visible=False)
77
+
78
+ try:
79
+ if isinstance(media, str) and media.lower().endswith(('.mp4', '.avi', '.mov')):
80
+ output_video, detection_summary = process_video(media)
81
+ return (None, output_video, detection_summary,
82
+ "βœ… Video processing complete! Check the detection summary below.",
83
+ gr.update(visible=False), gr.update(visible=True))
84
+ else:
85
+ if isinstance(media, str):
86
+ image = cv2.imread(media)
87
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
88
+ else:
89
+ image = media
90
+ processed_image, detection_info = process_image(image)
91
+ return (processed_image, None, detection_info,
92
+ "βœ… Image processing complete! Check the detections below.",
93
+ gr.update(visible=True), gr.update(visible=False))
94
+ except Exception as e:
95
+ return None, None, None, f"❌ Error: {str(e)}", gr.update(visible=False), gr.update(visible=False)
96
+
97
+ # Custom CSS for styling
98
+ custom_css = """
99
+ #app-container {
100
+ max-width: 1200px;
101
+ margin: 0 auto;
102
+ padding: 20px;
103
+ }
104
+
105
+ #logo-img {
106
+ display: block;
107
+ margin: 0 auto;
108
+ max-height: 100px;
109
+ margin-bottom: 20px;
110
+ }
111
+
112
+ .upload-box {
113
+ border: 2px dashed #ccc;
114
+ padding: 20px;
115
+ text-align: center;
116
+ border-radius: 8px;
117
+ background-color: #f8f9fa;
118
+ margin: 20px 0;
119
+ }
120
+
121
+ .results-container {
122
+ background-color: #ffffff;
123
+ border-radius: 8px;
124
+ padding: 15px;
125
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
126
+ margin-top: 20px;
127
+ }
128
+
129
+ .detection-info {
130
+ background-color: #f8f9fa;
131
+ padding: 15px;
132
+ border-radius: 8px;
133
+ margin-top: 10px;
134
+ font-family: monospace;
135
+ }
136
+ """
137
+
138
+ # Create Gradio interface
139
+ with gr.Blocks(css=custom_css) as demo:
140
+ with gr.Column(elem_id="app-container"):
141
+ # Logo and Header
142
+ gr.HTML(
143
+ """
144
+ <div style="text-align: center; margin-bottom: 1rem">
145
+ <img src="logo-h.png" id="logo-img" alt="Logo">
146
+ </div>
147
+ """
148
+ )
149
+
150
+ with gr.Column():
151
+ gr.Markdown("# πŸ” Object Detection")
152
+
153
+ # Upload Section
154
+ with gr.Column(elem_classes="upload-box"):
155
+ gr.Markdown("### πŸ“€ Upload your file")
156
+ input_media = gr.File(
157
+ label="Drag and drop or click to upload (Images: jpg, jpeg, png | Videos: mp4, avi, mov)",
158
+ file_types=["image", "video"]
159
+ )
160
+
161
+ # Status Message
162
+ status_text = gr.Textbox(
163
+ label="Status",
164
+ value="Waiting for upload...",
165
+ interactive=False
166
+ )
167
+
168
+ # Detection Information
169
+ detection_info = gr.Textbox(
170
+ label="Detection Results",
171
+ elem_classes="detection-info",
172
+ interactive=False
173
+ )
174
+
175
+ # Results Section
176
+ with gr.Column(elem_classes="results-container"):
177
+ with gr.Row():
178
+ with gr.Column(visible=False) as image_column:
179
+ output_image = gr.Image(label="Detected Objects")
180
+ with gr.Column(visible=False) as video_column:
181
+ output_video = gr.Video(label="Processed Video")
182
+
183
+ # Handle file upload
184
+ input_media.upload(
185
+ fn=detect_objects,
186
+ inputs=[input_media],
187
+ outputs=[output_image, output_video, detection_info, status_text,
188
+ image_column, video_column]
189
+ )
190
+
191
+ if __name__ == "__main__":
192
+ demo.launch(share=True)
logo-h.png ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ ultralytics>=8.0.0
3
+ torch>=2.0.0
4
+ Pillow>=9.0.0
5
+ opencv-python>=4.8.0
6
+ numpy>=1.24.0