Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +76 -0
- best.pt +3 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
import os
|
6 |
+
import tempfile
|
7 |
+
from pathlib import Path
|
8 |
+
from ultralytics import YOLO
|
9 |
+
|
10 |
+
# Load the YOLO model
|
11 |
+
model_path = Path(__file__).parent / "best.pt"
|
12 |
+
model = YOLO(model_path)
|
13 |
+
|
14 |
+
def process_video(video_path):
|
15 |
+
"""
|
16 |
+
Process a video with the YOLO model and return the processed video path
|
17 |
+
"""
|
18 |
+
if not video_path:
|
19 |
+
return None
|
20 |
+
|
21 |
+
# Create temporary file for output
|
22 |
+
temp_output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
23 |
+
|
24 |
+
# Process video with YOLO
|
25 |
+
cap = cv2.VideoCapture(video_path)
|
26 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
27 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
28 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
29 |
+
|
30 |
+
# Define codec and create VideoWriter object
|
31 |
+
output = cv2.VideoWriter(
|
32 |
+
temp_output_path,
|
33 |
+
cv2.VideoWriter_fourcc(*'mp4v'),
|
34 |
+
fps,
|
35 |
+
(width, height)
|
36 |
+
)
|
37 |
+
|
38 |
+
# Process each frame
|
39 |
+
while cap.isOpened():
|
40 |
+
ret, frame = cap.read()
|
41 |
+
if not ret:
|
42 |
+
break
|
43 |
+
|
44 |
+
# Run YOLOv8 inference on the frame
|
45 |
+
results = model(frame)
|
46 |
+
|
47 |
+
# Visualize the results on the frame
|
48 |
+
annotated_frame = results[0].plot()
|
49 |
+
|
50 |
+
# Write the frame to the output video
|
51 |
+
output.write(annotated_frame)
|
52 |
+
|
53 |
+
# Release resources
|
54 |
+
cap.release()
|
55 |
+
output.release()
|
56 |
+
|
57 |
+
return temp_output_path
|
58 |
+
|
59 |
+
# Create the Gradio interface
|
60 |
+
with gr.Blocks() as app:
|
61 |
+
gr.Markdown("# Vehicle Detection with YOLOv12")
|
62 |
+
gr.Markdown("Upload a video and click 'Submit' to detect vehicles using a fine-tuned YOLOv12 model.")
|
63 |
+
|
64 |
+
with gr.Row():
|
65 |
+
input_video = gr.Video(label="Upload Video")
|
66 |
+
output_video = gr.Video(label="Processed Video")
|
67 |
+
|
68 |
+
submit_btn = gr.Button("Submit")
|
69 |
+
submit_btn.click(
|
70 |
+
fn=process_video,
|
71 |
+
inputs=[input_video],
|
72 |
+
outputs=[output_video]
|
73 |
+
)
|
74 |
+
|
75 |
+
if __name__ == "__main__":
|
76 |
+
app.launch()
|
best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6f009561338788061ccc2da8817872cc324e5a044a1d2e7a9dc43d96e844fac
|
3 |
+
size 5544083
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.0.0
|
2 |
+
torch>=2.0.0
|
3 |
+
opencv-python>=4.5.0
|
4 |
+
numpy>=1.22.0
|
5 |
+
ultralytics>=8.0.0
|