Spaces:
Runtime error
Runtime error
Bhaskar Saranga
commited on
Commit
·
bcc8153
1
Parent(s):
e215925
Added tracking in seperate tab
Browse files
app.py
CHANGED
|
@@ -179,8 +179,9 @@ def inference_comp(image,iou_threshold,confidence_threshold):
|
|
| 179 |
v7_out, v7_fps = inference(image, "yolov7",iou_threshold,confidence_threshold)
|
| 180 |
return v7_out,v8_out,v7_fps,v8_fps
|
| 181 |
|
| 182 |
-
def MODT(sourceVideo,
|
| 183 |
-
model_path = 'weights/'+str(model_link)+'.pt'
|
|
|
|
| 184 |
return MOT(model_path, trackingmethod, sourceVideo), 30
|
| 185 |
|
| 186 |
examples_images = ['data/images/1.jpg',
|
|
@@ -212,7 +213,6 @@ with gr.Blocks() as demo:
|
|
| 212 |
video_output = gr.Video(type="pil", label="Output Video",format="mp4")
|
| 213 |
fps_video = gr.Number(0,label='FPS')
|
| 214 |
video_drop = gr.Dropdown(label="Model", choices=models,value=models[0])
|
| 215 |
-
tracking_drop = gr.Dropdown(label="Tracker", choices=trackers,value=trackers[0])
|
| 216 |
video_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
|
| 217 |
video_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
|
| 218 |
gr.Examples(examples=examples_videos,inputs=video_input,outputs=video_output)
|
|
@@ -235,6 +235,18 @@ with gr.Blocks() as demo:
|
|
| 235 |
v7_fps_image = gr.Number(0,label='v7 FPS')
|
| 236 |
v8_fps_image = gr.Number(0,label='v8 FPS')
|
| 237 |
gr.Examples(examples=examples_images,inputs=image_comp_input,outputs=[image_comp_output_v7,image_comp_output_v8])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
|
| 240 |
text_button.click(inference, inputs=[image_input,image_drop,
|
|
@@ -247,7 +259,7 @@ with gr.Blocks() as demo:
|
|
| 247 |
image_comp_iou_threshold,
|
| 248 |
image_comp_conf_threshold],
|
| 249 |
outputs=[image_comp_output_v7,image_comp_output_v8,v7_fps_image,v8_fps_image])
|
| 250 |
-
video_button_track.click(MODT,inputs=[
|
| 251 |
-
outputs=[
|
| 252 |
|
| 253 |
demo.launch(debug=True,enable_queue=True)
|
|
|
|
| 179 |
v7_out, v7_fps = inference(image, "yolov7",iou_threshold,confidence_threshold)
|
| 180 |
return v7_out,v8_out,v7_fps,v8_fps
|
| 181 |
|
| 182 |
+
def MODT(sourceVideo, trackingmethod):
|
| 183 |
+
#model_path = 'weights/'+str(model_link)+'.pt'
|
| 184 |
+
model_path = 'weights/yolov8m.pt'
|
| 185 |
return MOT(model_path, trackingmethod, sourceVideo), 30
|
| 186 |
|
| 187 |
examples_images = ['data/images/1.jpg',
|
|
|
|
| 213 |
video_output = gr.Video(type="pil", label="Output Video",format="mp4")
|
| 214 |
fps_video = gr.Number(0,label='FPS')
|
| 215 |
video_drop = gr.Dropdown(label="Model", choices=models,value=models[0])
|
|
|
|
| 216 |
video_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
|
| 217 |
video_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
|
| 218 |
gr.Examples(examples=examples_videos,inputs=video_input,outputs=video_output)
|
|
|
|
| 235 |
v7_fps_image = gr.Number(0,label='v7 FPS')
|
| 236 |
v8_fps_image = gr.Number(0,label='v8 FPS')
|
| 237 |
gr.Examples(examples=examples_images,inputs=image_comp_input,outputs=[image_comp_output_v7,image_comp_output_v8])
|
| 238 |
+
|
| 239 |
+
with gr.Tab("Video Tacking"):
|
| 240 |
+
gr.Markdown("## MOT using YoloV8 detection with tracking")
|
| 241 |
+
with gr.Row():
|
| 242 |
+
videotr_input = gr.Video(type='pil', label="Input Video", source="upload")
|
| 243 |
+
videotr_output = gr.Video(type="pil", label="Output Video",format="mp4")
|
| 244 |
+
fpstr_video = gr.Number(0,label='FPS')
|
| 245 |
+
tracking_drop = gr.Dropdown(choices=trackers,value=trackers[0], label="Select the tracking method")
|
| 246 |
+
videotr_iou_threshold = gr.Slider(label="IOU Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.45)
|
| 247 |
+
videotr_conf_threshold = gr.Slider(label="Confidence Threshold",interactive=True, minimum=0.0, maximum=1.0, value=0.25)
|
| 248 |
+
gr.Examples(examples=examples_videos,inputs=video_input,outputs=video_output)
|
| 249 |
+
video_button_track = gr.Button("Track")
|
| 250 |
|
| 251 |
|
| 252 |
text_button.click(inference, inputs=[image_input,image_drop,
|
|
|
|
| 259 |
image_comp_iou_threshold,
|
| 260 |
image_comp_conf_threshold],
|
| 261 |
outputs=[image_comp_output_v7,image_comp_output_v8,v7_fps_image,v8_fps_image])
|
| 262 |
+
video_button_track.click(MODT,inputs=[videotr_input, tracking_drop],
|
| 263 |
+
outputs=[videotr_output, fpstr_video])
|
| 264 |
|
| 265 |
demo.launch(debug=True,enable_queue=True)
|
track.py
CHANGED
|
@@ -29,22 +29,18 @@ if str(ROOT / 'trackers' / 'strongsort') not in sys.path:
|
|
| 29 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
| 30 |
|
| 31 |
import logging
|
| 32 |
-
|
| 33 |
from ultralytics.nn.autobackend import AutoBackend
|
| 34 |
-
#from yolov8.ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadStreams
|
| 35 |
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadStreams
|
| 36 |
-
#from yolov8.ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS
|
| 37 |
from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS
|
| 38 |
-
#from yolov8.ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, SETTINGS, callbacks, colorstr, ops
|
| 39 |
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, SETTINGS, callbacks, colorstr, ops
|
| 40 |
-
|
| 41 |
-
#from yolov8.ultralytics.yolo.utils.checks import check_file, check_imgsz, check_imshow, print_args, check_requirements
|
| 42 |
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_imshow, print_args, check_requirements
|
| 43 |
from ultralytics.yolo.utils.files import increment_path
|
| 44 |
from ultralytics.yolo.utils.torch_utils import select_device
|
| 45 |
from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes, process_mask, process_mask_native
|
| 46 |
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
|
| 47 |
|
|
|
|
| 48 |
from trackers.multi_tracker_zoo import create_tracker
|
| 49 |
|
| 50 |
|
|
@@ -85,6 +81,8 @@ def run(
|
|
| 85 |
vid_stride=1, # video frame-rate stride
|
| 86 |
retina_masks=False,
|
| 87 |
):
|
|
|
|
|
|
|
| 88 |
|
| 89 |
source = str(source)
|
| 90 |
save_img = not nosave and not source.endswith('.txt') # save inference images
|
|
@@ -108,6 +106,8 @@ def run(
|
|
| 108 |
# Load model
|
| 109 |
device = select_device(device)
|
| 110 |
is_seg = '-seg' in str(yolo_weights)
|
|
|
|
|
|
|
| 111 |
model = AutoBackend(yolo_weights, device=device, dnn=dnn, fp16=half)
|
| 112 |
stride, names, pt = model.stride, model.names, model.pt
|
| 113 |
imgsz = check_imgsz(imgsz, stride=stride) # check image size
|
|
@@ -390,8 +390,10 @@ def MOT(yoloweights, trackingmethod, sourceVideo):
|
|
| 390 |
save_dir = increment_path('exp', exist_ok=True)
|
| 391 |
input = os.path.join(save_dir,'out.mp4')
|
| 392 |
outpath = 'output.mp4' #'output/'+ 'output.mp4'
|
|
|
|
|
|
|
|
|
|
| 393 |
command = f"ffmpeg -i {input} -vf fps=30 -vcodec libx264 {outpath}"
|
| 394 |
print(command)
|
| 395 |
os.system(command)
|
| 396 |
-
#!ffmpeg -i $input -vf fps=30 -vcodec libx264 $outpath tbd
|
| 397 |
return outpath
|
|
|
|
| 29 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
| 30 |
|
| 31 |
import logging
|
| 32 |
+
|
| 33 |
from ultralytics.nn.autobackend import AutoBackend
|
|
|
|
| 34 |
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadStreams
|
|
|
|
| 35 |
from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS
|
|
|
|
| 36 |
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, SETTINGS, callbacks, colorstr, ops
|
|
|
|
|
|
|
| 37 |
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_imshow, print_args, check_requirements
|
| 38 |
from ultralytics.yolo.utils.files import increment_path
|
| 39 |
from ultralytics.yolo.utils.torch_utils import select_device
|
| 40 |
from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes, process_mask, process_mask_native
|
| 41 |
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
|
| 42 |
|
| 43 |
+
|
| 44 |
from trackers.multi_tracker_zoo import create_tracker
|
| 45 |
|
| 46 |
|
|
|
|
| 81 |
vid_stride=1, # video frame-rate stride
|
| 82 |
retina_masks=False,
|
| 83 |
):
|
| 84 |
+
#print the inputs
|
| 85 |
+
print(f"model used : {yolo_weights}, tracking method : {tracking_method}")
|
| 86 |
|
| 87 |
source = str(source)
|
| 88 |
save_img = not nosave and not source.endswith('.txt') # save inference images
|
|
|
|
| 106 |
# Load model
|
| 107 |
device = select_device(device)
|
| 108 |
is_seg = '-seg' in str(yolo_weights)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
model = AutoBackend(yolo_weights, device=device, dnn=dnn, fp16=half)
|
| 112 |
stride, names, pt = model.stride, model.names, model.pt
|
| 113 |
imgsz = check_imgsz(imgsz, stride=stride) # check image size
|
|
|
|
| 390 |
save_dir = increment_path('exp', exist_ok=True)
|
| 391 |
input = os.path.join(save_dir,'out.mp4')
|
| 392 |
outpath = 'output.mp4' #'output/'+ 'output.mp4'
|
| 393 |
+
if os.path.exists(outpath):
|
| 394 |
+
os.remove(outpath)
|
| 395 |
+
|
| 396 |
command = f"ffmpeg -i {input} -vf fps=30 -vcodec libx264 {outpath}"
|
| 397 |
print(command)
|
| 398 |
os.system(command)
|
|
|
|
| 399 |
return outpath
|