ZunYin commited on
Commit
3f185a6
·
verified ·
1 Parent(s): 6992879

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -34
app.py CHANGED
@@ -1,68 +1,54 @@
 
1
  import gradio as gr
2
  import cv2
3
- import requests
4
- import os
5
  from ultralytics import YOLO
6
 
7
- # Define file URLs for images and videos
8
- file_urls = [
9
- 'https://drive.google.com/file/d/1rvuphnn3BV4NdILrQE72jU7fxA79SiYn/view', # Image
10
- 'https://drive.google.com/file/d/16gu9cLamGFrM5DRd1WJyk_6Xt9v0S7go/view', # Image
11
- 'https://drive.google.com/file/d/1UgZi54js65f5qGhNF3nGLZwIN5nrUek6/view', # Video
12
- ]
13
 
14
- # Helper function to download files
15
- def download_file(url, save_name):
16
- if not os.path.exists(save_name):
17
- file = requests.get(url)
18
- open(save_name, 'wb').write(file.content)
19
 
20
- # Download example files
21
- for i, url in enumerate(file_urls):
22
- if url.endswith(".mp4"):
23
- download_file(url, f"video.mp4")
24
- else:
25
- download_file(url, f"image_{i}.jpg")
26
 
27
  # Load the YOLO model
28
  model = YOLO('best.pt')
29
 
30
- # Define example paths for Gradio
31
- image_examples = [["image_0.jpg"], ["image_1.jpg"]]
32
- video_examples = [["video.mp4"]]
33
-
34
  # Function for processing images
35
  def show_preds_image(image_path):
36
  image = cv2.imread(image_path)
37
  results = model.predict(source=image_path)
38
- annotated_image = results[0].plot() # YOLO provides a built-in plot function
39
  return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
40
 
41
  # Function for processing videos
42
  def show_preds_video(video_path):
43
  cap = cv2.VideoCapture(video_path)
44
- out_frames = [] # List to store annotated frames
45
-
46
  while cap.isOpened():
47
  ret, frame = cap.read()
48
  if not ret:
49
  break
50
-
51
  results = model.predict(source=frame)
52
  annotated_frame = results[0].plot()
53
  out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
54
-
55
  cap.release()
56
-
57
  # Save the annotated video
58
  output_path = "annotated_video.mp4"
59
  height, width, _ = out_frames[0].shape
60
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
61
- writer = cv2.VideoWriter(output_path, fourcc, 20, (width, height))
62
-
63
  for frame in out_frames:
64
  writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
65
-
66
  writer.release()
67
  return output_path
68
 
@@ -77,7 +63,7 @@ interface_image = gr.Interface(
77
  examples=image_examples,
78
  )
79
 
80
- inputs_video = gr.Video(label="Input Video") # Removed type argument
81
  outputs_video = gr.Video(label="Annotated Output")
82
  interface_video = gr.Interface(
83
  fn=show_preds_video,
@@ -91,4 +77,4 @@ interface_video = gr.Interface(
91
  gr.TabbedInterface(
92
  [interface_image, interface_video],
93
  tab_names=['Image Inference', 'Video Inference']
94
- ).launch(share=True)
 
1
+ import os
2
  import gradio as gr
3
  import cv2
 
 
4
  from ultralytics import YOLO
5
 
6
+ # Define the folder containing the images and video
7
+ folder_path = "info" # Replace with your folder name or path
8
+
9
+ # Get list of files from the folder
10
+ image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
11
+ video_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.mp4', '.avi', '.mov'))]
12
 
13
+ # Ensure the folder contains the expected number of files
14
+ if len(image_files) < 2 or len(video_files) < 1:
15
+ raise ValueError("Folder must contain at least 2 images and 1 video.")
 
 
16
 
17
+ # Select the first two images and the first video
18
+ image_examples = [[image_files[0]], [image_files[1]]]
19
+ video_examples = [[video_files[0]]]
 
 
 
20
 
21
  # Load the YOLO model
22
  model = YOLO('best.pt')
23
 
 
 
 
 
24
  # Function for processing images
25
  def show_preds_image(image_path):
26
  image = cv2.imread(image_path)
27
  results = model.predict(source=image_path)
28
+ annotated_image = results[0].plot()
29
  return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
30
 
31
  # Function for processing videos
32
  def show_preds_video(video_path):
33
  cap = cv2.VideoCapture(video_path)
34
+ out_frames = []
35
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
36
  while cap.isOpened():
37
  ret, frame = cap.read()
38
  if not ret:
39
  break
 
40
  results = model.predict(source=frame)
41
  annotated_frame = results[0].plot()
42
  out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
 
43
  cap.release()
44
+
45
  # Save the annotated video
46
  output_path = "annotated_video.mp4"
47
  height, width, _ = out_frames[0].shape
48
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
49
+ writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
 
50
  for frame in out_frames:
51
  writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
 
52
  writer.release()
53
  return output_path
54
 
 
63
  examples=image_examples,
64
  )
65
 
66
+ inputs_video = gr.Video(label="Input Video")
67
  outputs_video = gr.Video(label="Annotated Output")
68
  interface_video = gr.Interface(
69
  fn=show_preds_video,
 
77
  gr.TabbedInterface(
78
  [interface_image, interface_video],
79
  tab_names=['Image Inference', 'Video Inference']
80
+ ).launch(share=True)