Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,68 +1,54 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
3 |
-
import requests
|
4 |
-
import os
|
5 |
from ultralytics import YOLO
|
6 |
|
7 |
-
# Define
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
]
|
13 |
|
14 |
-
#
|
15 |
-
|
16 |
-
|
17 |
-
file = requests.get(url)
|
18 |
-
open(save_name, 'wb').write(file.content)
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
download_file(url, f"video.mp4")
|
24 |
-
else:
|
25 |
-
download_file(url, f"image_{i}.jpg")
|
26 |
|
27 |
# Load the YOLO model
|
28 |
model = YOLO('best.pt')
|
29 |
|
30 |
-
# Define example paths for Gradio
|
31 |
-
image_examples = [["image_0.jpg"], ["image_1.jpg"]]
|
32 |
-
video_examples = [["video.mp4"]]
|
33 |
-
|
34 |
# Function for processing images
|
35 |
def show_preds_image(image_path):
|
36 |
image = cv2.imread(image_path)
|
37 |
results = model.predict(source=image_path)
|
38 |
-
annotated_image = results[0].plot()
|
39 |
return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
|
40 |
|
41 |
# Function for processing videos
|
42 |
def show_preds_video(video_path):
|
43 |
cap = cv2.VideoCapture(video_path)
|
44 |
-
out_frames = []
|
45 |
-
|
46 |
while cap.isOpened():
|
47 |
ret, frame = cap.read()
|
48 |
if not ret:
|
49 |
break
|
50 |
-
|
51 |
results = model.predict(source=frame)
|
52 |
annotated_frame = results[0].plot()
|
53 |
out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
54 |
-
|
55 |
cap.release()
|
56 |
-
|
57 |
# Save the annotated video
|
58 |
output_path = "annotated_video.mp4"
|
59 |
height, width, _ = out_frames[0].shape
|
60 |
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
61 |
-
writer = cv2.VideoWriter(output_path, fourcc,
|
62 |
-
|
63 |
for frame in out_frames:
|
64 |
writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
65 |
-
|
66 |
writer.release()
|
67 |
return output_path
|
68 |
|
@@ -77,7 +63,7 @@ interface_image = gr.Interface(
|
|
77 |
examples=image_examples,
|
78 |
)
|
79 |
|
80 |
-
inputs_video = gr.Video(label="Input Video")
|
81 |
outputs_video = gr.Video(label="Annotated Output")
|
82 |
interface_video = gr.Interface(
|
83 |
fn=show_preds_video,
|
@@ -91,4 +77,4 @@ interface_video = gr.Interface(
|
|
91 |
gr.TabbedInterface(
|
92 |
[interface_image, interface_video],
|
93 |
tab_names=['Image Inference', 'Video Inference']
|
94 |
-
).launch(share=True)
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
import cv2
|
|
|
|
|
4 |
from ultralytics import YOLO
|
5 |
|
6 |
+
# Define the folder containing the images and video
|
7 |
+
folder_path = "info" # Replace with your folder name or path
|
8 |
+
|
9 |
+
# Get list of files from the folder
|
10 |
+
image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
|
11 |
+
video_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.mp4', '.avi', '.mov'))]
|
12 |
|
13 |
+
# Ensure the folder contains the expected number of files
|
14 |
+
if len(image_files) < 2 or len(video_files) < 1:
|
15 |
+
raise ValueError("Folder must contain at least 2 images and 1 video.")
|
|
|
|
|
16 |
|
17 |
+
# Select the first two images and the first video
|
18 |
+
image_examples = [[image_files[0]], [image_files[1]]]
|
19 |
+
video_examples = [[video_files[0]]]
|
|
|
|
|
|
|
20 |
|
21 |
# Load the YOLO model
|
22 |
model = YOLO('best.pt')
|
23 |
|
|
|
|
|
|
|
|
|
24 |
# Function for processing images
|
25 |
def show_preds_image(image_path):
|
26 |
image = cv2.imread(image_path)
|
27 |
results = model.predict(source=image_path)
|
28 |
+
annotated_image = results[0].plot()
|
29 |
return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
|
30 |
|
31 |
# Function for processing videos
|
32 |
def show_preds_video(video_path):
|
33 |
cap = cv2.VideoCapture(video_path)
|
34 |
+
out_frames = []
|
35 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
36 |
while cap.isOpened():
|
37 |
ret, frame = cap.read()
|
38 |
if not ret:
|
39 |
break
|
|
|
40 |
results = model.predict(source=frame)
|
41 |
annotated_frame = results[0].plot()
|
42 |
out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
|
|
43 |
cap.release()
|
44 |
+
|
45 |
# Save the annotated video
|
46 |
output_path = "annotated_video.mp4"
|
47 |
height, width, _ = out_frames[0].shape
|
48 |
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
49 |
+
writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
|
|
50 |
for frame in out_frames:
|
51 |
writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
|
|
52 |
writer.release()
|
53 |
return output_path
|
54 |
|
|
|
63 |
examples=image_examples,
|
64 |
)
|
65 |
|
66 |
+
inputs_video = gr.Video(label="Input Video")
|
67 |
outputs_video = gr.Video(label="Annotated Output")
|
68 |
interface_video = gr.Interface(
|
69 |
fn=show_preds_video,
|
|
|
77 |
gr.TabbedInterface(
|
78 |
[interface_image, interface_video],
|
79 |
tab_names=['Image Inference', 'Video Inference']
|
80 |
+
).launch(share=True)
|