import cv2 from ultralytics import YOLO import numpy as np import os import gradio as gr def fonk(video_path): model=YOLO("/home/kahraman/Masaüstü/HuggingFace_Models_and_Spaces/Cattle_Detection_with_YOLOV8/best.pt") cap=cv2.VideoCapture(video_path) frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) size = (frame_width, frame_height) writer = cv2.VideoWriter("filename.mp4", cv2.VideoWriter_fourcc(*"DIVX"), 10, size) while True: ret, frame= cap.read() if ret!=True: break results= model(frame) for result in results: box=result.boxes x1, y1, x2, y2 = map(int, box.xyxy[0]) print(x1, y1, x2, y2) frame= cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) output_video= writer.write(frame) return output_video demo = gr.Interface(fonk, inputs= gr.Video(), outputs=gr.Video(), examples=["/home/kahraman/Masaüstü/HuggingFace_Models_and_Spaces/Cattle_Detection_with_YOLOV8/cow-video-cows-mooing-and-grazing-in-a-field.mp4"], title= "cows", cache_examples=True) demo.launch()