2384603g / app.py
2384603g's picture
Update app.py
52f54e1 verified
from ultralytics import YOLO
from PIL import Image
import gradio as gr
from huggingface_hub import snapshot_download
from tqdm.auto import tqdm
import os
import tempfile
import cv2 # OpenCV for video processing
model_path = "best_int8_openvino_model"
def load_model(repo_id):
download_dir = snapshot_download(repo_id)
print(download_dir)
path = os.path.join(download_dir, "best_int8_openvino_model")
print(path)
detection_model = YOLO(path, task='detect')
return detection_model
def process_image(pilimg):
source = pilimg
# x = np.asarray(pilimg)
# print(x.shape)
result = detection_model.predict(source, conf=0.5)
img_bgr = result[0].plot()
out_pilimg = Image.fromarray(img_bgr[..., ::-1]) # RGB-order PIL image
return out_pilimg
def process_video(video):
print(video)
video_reader = cv2.VideoCapture(video)
print(video_reader)
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = video_reader.get(cv2.CAP_PROP_FPS)
temp_dir = tempfile.mkdtemp()
output_path = os.path.join(temp_dir, "annotated_video.mp4")
video_writer = cv2.VideoWriter(output_path,
cv2.VideoWriter_fourcc(*'mp4v'),
fps,
(frame_w, frame_h))
# Loop through the video frames
for i in tqdm(range(nb_frames)):
# Read a frame from the video
success, frame = video_reader.read()
if success:
# Run YOLO inference on the frame on GPU Device 0
results = detection_model.predict(frame, conf=0.5)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame
video_writer.write(annotated_frame)
video_reader.release()
video_writer.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
return output_path
REPO_ID = "2384603g/violin_GuZheng_WM"
detection_model = load_model(REPO_ID)
# Define a message to display at the top of the app
message = "<h1>Welcome to the Image and Video Upload App For Violin & Guzheng!</h1><br>Done By Tang Wei Ming (2384603G)<br><p>Please upload an image or a video of Violin & Guzheng to get started.</p>"
# Create the interface for image upload
image_interface = gr.Interface(fn=process_image,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil"))
# Create the interface for video upload
video_interface = gr.Interface(fn=process_video,
inputs=gr.Video(label="Upload a Video"),
outputs="video")
# Use gr.Blocks to arrange components and launch the app
with gr.Blocks() as app:
gr.HTML(message) # Add the message at the top
gr.TabbedInterface([image_interface, video_interface],
tab_names=["Image Upload", "Video Upload"])
# Launch the interface
app.launch()