kdevoe commited on
Commit
b67e16c
·
verified ·
1 Parent(s): ea37c20

Changing to use opencv for real time detection

Browse files
Files changed (1) hide show
  1. app.py +35 -27
app.py CHANGED
@@ -1,47 +1,54 @@
1
- import gradio as gr
2
  import cv2
 
3
  import numpy as np
4
  from transformers import pipeline
5
 
6
- # Load the YOLO model using Hugging Face's pipeline
7
  model = pipeline("object-detection", model="hustvl/yolos-tiny")
8
 
9
- # Function to run YOLO on each video frame
10
- def detect_objects(frame):
11
- # Convert frame to RGB as required by the model
12
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 
 
 
 
13
 
14
- # Run object detection
15
- results = model(rgb_frame)
16
 
17
- # Draw bounding boxes and labels
18
- for result in results:
19
- # Extract details
20
- label = result['label']
21
- score = result['score']
22
- box = result['box']
23
- x1, y1, x2, y2 = int(box['xmin']), int(box['ymin']), int(box['xmax']), int(box['ymax'])
24
 
25
- # Draw rectangle and label on the frame
26
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
27
- text = f"{label}: {score:.2f}"
28
- cv2.putText(frame, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
 
 
29
 
30
- return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert back to RGB for Gradio
 
 
 
31
 
32
- # Gradio interface to capture video frames
33
- def video_stream(frame):
34
- # Run object detection on the frame
35
- annotated_frame = detect_objects(frame)
36
- return annotated_frame
 
 
 
37
 
38
  # Create Gradio interface
39
  webcam_interface = gr.Interface(
40
  fn=video_stream,
41
- inputs=gr.Video(format="mp4", streaming=True),
42
  outputs=gr.Image(),
43
  live=True,
44
- description="Real-Time Object Detection with YOLO on Hugging Face"
45
  )
46
 
47
  # Launch Gradio app
@@ -49,3 +56,4 @@ if __name__ == "__main__":
49
  webcam_interface.launch()
50
 
51
 
 
 
 
1
  import cv2
2
+ import gradio as gr
3
  import numpy as np
4
  from transformers import pipeline
5
 
6
+ # Load YOLO model from Hugging Face's transformers library
7
  model = pipeline("object-detection", model="hustvl/yolos-tiny")
8
 
9
+ # Function to capture and process video frames in real time
10
+ def capture_and_detect():
11
+ cap = cv2.VideoCapture(0) # OpenCV video capture from webcam
12
+
13
+ while True:
14
+ ret, frame = cap.read()
15
+ if not ret:
16
+ break
17
 
18
+ # Convert frame to RGB as required by YOLO model
19
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
20
 
21
+ # Perform object detection on the frame
22
+ results = model(rgb_frame)
 
 
 
 
 
23
 
24
+ # Draw bounding boxes and labels on the frame
25
+ for result in results:
26
+ label = result['label']
27
+ score = result['score']
28
+ box = result['box']
29
+ x1, y1, x2, y2 = int(box['xmin']), int(box['ymin']), int(box['xmax']), int(box['ymax'])
30
 
31
+ # Draw bounding box and label
32
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
33
+ text = f"{label}: {score:.2f}"
34
+ cv2.putText(frame, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
35
 
36
+ # Convert BGR back to RGB for Gradio display
37
+ yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
38
+
39
+ cap.release()
40
+
41
+ # Gradio Interface using real-time video capture and object detection
42
+ def video_stream():
43
+ return capture_and_detect()
44
 
45
  # Create Gradio interface
46
  webcam_interface = gr.Interface(
47
  fn=video_stream,
48
+ inputs=None,
49
  outputs=gr.Image(),
50
  live=True,
51
+ description="Real-Time Object Detection with YOLO and Gradio"
52
  )
53
 
54
  # Launch Gradio app
 
56
  webcam_interface.launch()
57
 
58
 
59
+