AlshimaaGamalAlsaied commited on
Commit
45f154c
·
1 Parent(s): f8fc251
Files changed (6) hide show
  1. app.py +125 -0
  2. img1.png +0 -0
  3. img2.png +0 -0
  4. img3.png +0 -0
  5. requirements.txt +4 -0
  6. yolo5_epoch100 +1 -0
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # import torch
3
+ # import yolov5
4
+
5
+ # # Images
6
+ # torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
7
+ # torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
8
+
9
+ # def yolov5_inference(
10
+ # image: gr.inputs.Image = None,
11
+ # model_path: gr.inputs.Dropdown = None,
12
+ # image_size: gr.inputs.Slider = 640,
13
+ # conf_threshold: gr.inputs.Slider = 0.25,
14
+ # iou_threshold: gr.inputs.Slider = 0.45,
15
+ # ):
16
+ # """
17
+ # YOLOv5 inference function
18
+ # Args:
19
+ # image: Input image
20
+ # model_path: Path to the model
21
+ # image_size: Image size
22
+ # conf_threshold: Confidence threshold
23
+ # iou_threshold: IOU threshold
24
+ # Returns:
25
+ # Rendered image
26
+ # """
27
+ # model = yolov5.load(model_path, device="cpu")
28
+ # model.conf = conf_threshold
29
+ # model.iou = iou_threshold
30
+ # results = model([image], size=image_size)
31
+ # return results.render()[0]
32
+
33
+
34
+ # inputs = [
35
+ # gr.inputs.Image(type="pil", label="Input Image"),
36
+ # gr.inputs.Dropdown(["yolov5s.pt", "yolov5l.pt", "yolov5x.pt"], label="Model"),
37
+ # gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
38
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
39
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
40
+ # ]
41
+
42
+ # outputs = gr.outputs.Image(type="filepath", label="Output Image")
43
+ # title = "YOLOv5"
44
+ # description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model."
45
+
46
+ # examples = [['zidane.jpg', 'yolov5s.pt', 640, 0.25, 0.45], ['image3.jpg', 'yolov5s.pt', 640, 0.25, 0.45]]
47
+ # demo_app = gr.Interface(
48
+ # fn=yolov5_inference,
49
+ # inputs=inputs,
50
+ # outputs=outputs,
51
+ # title=title,
52
+ # examples=examples,
53
+ # cache_examples=True,
54
+ # live=True,
55
+ # theme='huggingface',
56
+ # )
57
+ demo_app.launch(debug=True, enable_queue=True)
58
+ import gradio as gr
59
+ import torch
60
+ import yolov5
61
+ import subprocess
62
+ import tempfile
63
+ import time
64
+ from pathlib import Path
65
+ import uuid
66
+ import cv2
67
+ import gradio as gr
68
+
69
+
70
+
71
+ # Images
72
+ #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
73
+ #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
74
+
75
+ def image_fn(
76
+ image: gr.inputs.Image = None,
77
+ model_path: gr.inputs.Dropdown = None,
78
+ image_size: gr.inputs.Slider = 640,
79
+ conf_threshold: gr.inputs.Slider = 0.25,
80
+ iou_threshold: gr.inputs.Slider = 0.45,
81
+ ):
82
+ """
83
+ YOLOv5 inference function
84
+ Args:
85
+ image: Input image
86
+ model_path: Path to the model
87
+ image_size: Image size
88
+ conf_threshold: Confidence threshold
89
+ iou_threshold: IOU threshold
90
+ Returns:
91
+ Rendered image
92
+ """
93
+
94
+ model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
95
+ model.conf = conf_threshold
96
+ model.iou = iou_threshold
97
+ results = model([image], size=image_size)
98
+ return results.render()[0]
99
+
100
+
101
+
102
+ demo_app = gr.Interface(
103
+ fn=image_fn,
104
+ inputs=[
105
+ gr.inputs.Image(type="pil", label="Input Image"),
106
+ gr.inputs.Dropdown(
107
+ choices=[
108
+ "alshimaa/yolo5_epoch100",
109
+ #"kadirnar/yolov7-v0.1",
110
+ ],
111
+ default="alshimaa/yolo5_epoch100",
112
+ label="Model",
113
+ )
114
+ #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
115
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
116
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
117
+ ],
118
+ outputs=gr.outputs.Image(type="filepath", label="Output Image"),
119
+ title="Object Detector: Identify People Without Mask",
120
+ examples=[['img1.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img2.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img3.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45]],
121
+ cache_examples=True,
122
+ live=True,
123
+ theme='huggingface',
124
+ )
125
+ demo_app.launch(debug=True, enable_queue=True)
img1.png ADDED
img2.png ADDED
img3.png ADDED
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ yolov5
4
+ HfApi
yolo5_epoch100 ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit c2212ddb924a66157b32a3af4f71b35c3d4c23fb