Spaces:
Running
Running
File size: 1,446 Bytes
3e8018c ae0cfcc c5a82b4 3e8018c ae0cfcc 47094b3 ae0cfcc 47094b3 ae0cfcc 4aa08db c5a82b4 ae0cfcc c5a82b4 ae0cfcc 4aa08db ae0cfcc c5a82b4 ae0cfcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import torch
import gradio as gr
import cv2
import numpy as np
from datetime import datetime
# Load YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')
def detect_video(video):
cap = cv2.VideoCapture(video) # Directly use the video path
# List to hold results
detection_results = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Get timestamp
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Perform detection
results = model(frame)
# Extract bounding boxes and confidence scores
for *xyxy, conf, cls in results.xywh[0]:
x1, y1, x2, y2 = map(int, xyxy) # Convert to integers
detection_results.append({
'timestamp': timestamp,
'coordinates': {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2},
'confidence': float(conf)
})
cap.release()
return detection_results
# Gradio Interface
interface = gr.Interface(fn=detect_video,
inputs=gr.Video(), # Correct input
outputs="json",
live=True,
title="YOLOv5 Video Object Detection",
description="Upload a video to detect objects and get bounding boxes with timestamps.")
interface.launch()
|