Spaces:
Running
Running
import torch | |
import gradio as gr | |
import cv2 | |
import numpy as np | |
import tempfile | |
from datetime import datetime | |
import shutil | |
import os # Import os module for file removal | |
# Load YOLOv5 model | |
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') | |
def detect_video(video): | |
# Check if the video is in bytes (Gradio typically passes it as bytes) | |
if isinstance(video, bytes): | |
with tempfile.NamedTemporaryFile(delete=False) as tmpfile: | |
tmpfile.write(video) # Write the bytes to the temp file | |
video_path = tmpfile.name # Get the temp file path | |
else: | |
video_path = video # In case it's already a file path | |
cap = cv2.VideoCapture(video_path) # Open the temporary video file | |
# List to hold results | |
detection_results = [] | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
# Get timestamp | |
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') | |
# Perform detection | |
results = model(frame) | |
# Extract bounding boxes and confidence scores | |
for *xyxy, conf, cls in results.xywh[0]: | |
x1, y1, x2, y2 = map(int, xyxy) # Convert to integers | |
detection_results.append({ | |
'timestamp': timestamp, | |
'coordinates': {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}, | |
'confidence': float(conf) | |
}) | |
cap.release() | |
# Clean up the temporary file using os.remove | |
os.remove(video_path) | |
return detection_results | |
# Gradio Interface | |
interface = gr.Interface(fn=detect_video, | |
inputs=gr.Video(), # Correct input for video | |
outputs="json", | |
live=True, | |
title="YOLOv5 Video Object Detection", | |
description="Upload a video to detect objects and get bounding boxes with timestamps.") | |
# Launch the interface with share=True | |
interface.launch(share=True) | |