Spaces:
Runtime error
Runtime error
eeshawn
commited on
Commit
·
d8186bf
1
Parent(s):
028df33
updated app.py
Browse files- app.py +54 -31
- requirements.txt +0 -1
app.py
CHANGED
@@ -2,47 +2,70 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from ultralyticsplus import YOLO, render_result
|
4 |
|
5 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
6 |
yolo_model = YOLO('eeshawn11/naruto_hand_seal_detection')
|
7 |
-
yolo_model.
|
8 |
-
|
9 |
-
yolo_model.overrides['max_det'] = 1
|
10 |
yolo_model.to(device)
|
11 |
|
12 |
-
def
|
13 |
-
image
|
14 |
-
conf_threshold
|
15 |
-
iou_threshold: gr.Slider = 0.45,
|
16 |
):
|
17 |
"""
|
18 |
-
YOLOv8
|
|
|
19 |
Args:
|
20 |
image: Input image
|
21 |
-
model_path: Path to the model
|
22 |
conf_threshold: Confidence threshold
|
23 |
-
iou_threshold: IOU threshold
|
24 |
Returns:
|
25 |
Rendered image
|
26 |
"""
|
27 |
-
results = yolo_model.predict(image)
|
28 |
render = render_result(model=yolo_model, image=image, result=results[0])
|
29 |
return render
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
gr.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
from ultralyticsplus import YOLO, render_result
|
4 |
|
|
|
5 |
yolo_model = YOLO('eeshawn11/naruto_hand_seal_detection')
|
6 |
+
yolo_model.overrides['max_det'] = 10
|
7 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
8 |
yolo_model.to(device)
|
9 |
|
10 |
+
def seal_detection(
|
11 |
+
image,
|
12 |
+
conf_threshold,
|
|
|
13 |
):
|
14 |
"""
|
15 |
+
Object detection with YOLOv8 model, detecting basic Naruto hand seals.
|
16 |
+
|
17 |
Args:
|
18 |
image: Input image
|
|
|
19 |
conf_threshold: Confidence threshold
|
|
|
20 |
Returns:
|
21 |
Rendered image
|
22 |
"""
|
23 |
+
results = yolo_model.predict(image, conf=conf_threshold)
|
24 |
render = render_result(model=yolo_model, image=image, result=results[0])
|
25 |
return render
|
26 |
+
|
27 |
+
def clear():
|
28 |
+
image_upload = gr.update(value=None)
|
29 |
+
conf_slider = gr.update(value=0.5)
|
30 |
+
return image_upload, conf_slider
|
31 |
+
|
32 |
+
with gr.Blocks() as demo:
|
33 |
+
gr.Markdown("# Naruto Hand Seal Detection with YOLOv8")
|
34 |
+
gr.Markdown(
|
35 |
+
"""
|
36 |
+
### Introduction
|
37 |
+
|
38 |
+
As a data science practitioner and fan of Japanese manga, I was eager to apply my skills to a project that combined my interests. I decided to develop a computer vision model that could detect hand seals from the **Naruto** anime.
|
39 |
+
|
40 |
+
Hand seals are an integral part of the Naruto universe, used by characters to activate powerful techniques. There are twelve basic seals, each named after an animal in the Chinese Zodiac, and different sequences of hand seals are required for different techniques.
|
41 |
+
|
42 |
+
As a fan of the series, I knew that accurately detecting and classifying hand seals would be a difficult but rewarding challenge, and I was excited to tackle it using my expertise in machine learning and computer vision.
|
43 |
+
|
44 |
+
### Problem Statement
|
45 |
+
|
46 |
+
The challenge was to develop a model that could accurately identify the hand seal being performed.
|
47 |
+
|
48 |
+
### Methodology
|
49 |
+
|
50 |
+
In this project, I leveraged transfer learning from the [YOLOv8](https://github.com/ultralytics/ultralytics) model to customize an object detection model specifically for the hand seals.
|
51 |
+
|
52 |
+
There were several challenges during the development process, including limited available datasets of labelled images for training, so I had to create my own through a mix of screenshots from YouTube videos, as well as capturing images of myself performing the seals.
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
with gr.Row():
|
56 |
+
with gr.Column():
|
57 |
+
inputs = [
|
58 |
+
gr.Image(source="upload", type="pil", label="Image Upload", interactive=True),
|
59 |
+
gr.Slider(minimum=0.05, maximum=1.0, value=0.5, step=0.05, label="Confidence Threshold"),
|
60 |
+
]
|
61 |
+
with gr.Row():
|
62 |
+
clear_form = gr.Button("Reset")
|
63 |
+
submit = gr.Button("Predict")
|
64 |
+
outputs = gr.Image(type="filepath", label="Output Image", interactive=False)
|
65 |
+
gr.Markdown("Happy to connect on [LinkedIn](https://www.linkedin.com/in/shawn-sing/) or visit my [GitHub](https://github.com/eeshawn11/) to check out my other projects.")
|
66 |
+
|
67 |
+
clear_form.click(fn=clear, inputs=None, outputs=inputs)
|
68 |
+
submit.click(fn=seal_detection, inputs=inputs, outputs=outputs)
|
69 |
+
|
70 |
+
demo.queue(api_open=False, max_size=10)
|
71 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -1,2 +1 @@
|
|
1 |
-
ultralytics~=8.0.4
|
2 |
ultralyticsplus==0.0.28
|
|
|
|
|
1 |
ultralyticsplus==0.0.28
|