kisa-misa commited on
Commit
cb670b6
·
1 Parent(s): c82db4e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -0
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoFeatureExtractor, YolosForObjectDetection
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ import io
7
+ import numpy as np
8
+ import os
9
+ os.system("pip -qq install yoloxdetect==0.0.7")
10
+ from yoloxdetect import YoloxDetector
11
+
12
+ # Images
13
+ torch.hub.download_url_to_file('https://tochkanews.ru/wp-content/uploads/2020/09/0.jpg', '1.jpg')
14
+ torch.hub.download_url_to_file('https://s.rdrom.ru/1/pubs/4/35893/1906770.jpg', '2.jpg')
15
+ torch.hub.download_url_to_file('https://static.mk.ru/upload/entities/2022/04/17/07/articles/detailPicture/5b/39/28/b6/ffb1aa636dd62c30e6ff670f84474f75.jpg', '3.jpg')
16
+
17
+ def yolox_inference(
18
+ image_path: gr.inputs.Image = None,
19
+ model_path: gr.inputs.Dropdown = 'kadirnar/yolox_s-v0.1.1',
20
+ config_path: gr.inputs.Textbox = 'configs.yolox_s',
21
+ image_size: gr.inputs.Slider = 640
22
+ ):
23
+ """
24
+ YOLOX inference function
25
+ Args:
26
+ image: Input image
27
+ model_path: Path to the model
28
+ config_path: Path to the config file
29
+ image_size: Image size
30
+ Returns:
31
+ Rendered image
32
+ """
33
+
34
+ model = YoloxDetector(model_path, config_path=config_path, device="cpu", hf_model=True)
35
+ pred = model.predict(image_path=image_path, image_size=image_size)
36
+ return pred
37
+
38
+
39
+ inputs = [
40
+ gr.inputs.Image(type="filepath", label="Input Image"),
41
+ gr.inputs.Dropdown(
42
+ label="Model Path",
43
+ choices=[
44
+ "kadirnar/yolox_s-v0.1.1",
45
+ "kadirnar/yolox_m-v0.1.1",
46
+ "kadirnar/yolox_tiny-v0.1.1",
47
+ ],
48
+ default="kadirnar/yolox_s-v0.1.1",
49
+ ),
50
+ gr.inputs.Dropdown(
51
+ label="Config Path",
52
+ choices=[
53
+ "configs.yolox_s",
54
+ "configs.yolox_m",
55
+ "configs.yolox_tiny",
56
+ ],
57
+ default="configs.yolox_s",
58
+ ),
59
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
60
+ ]
61
+
62
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
63
+ title = "YOLOX is a high-performance anchor-free YOLO."
64
+
65
+ examples = [
66
+ ["1.jpg", "kadirnar/yolox_m-v0.1.1", "configs.yolox_m", 640],
67
+ ["2.jpg", "kadirnar/yolox_s-v0.1.1", "configs.yolox_s", 640],
68
+ ["3.jpg", "kadirnar/yolox_tiny-v0.1.1", "configs.yolox_tiny", 640],
69
+ ]
70
+
71
+ demo_app = gr.Interface(
72
+ fn=yolox_inference,
73
+ inputs=inputs,
74
+ outputs=outputs,
75
+ title=title,
76
+ examples=examples,
77
+ cache_examples=True,
78
+ theme='huggingface',
79
+ )
80
+ demo_app.launch(debug=True, enable_queue=True)
81
+
82
+
83
+ COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
84
+ [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
85
+
86
+
87
+ def get_class_list_from_input(classes_string: str):
88
+ if classes_string == "":
89
+ return []
90
+ classes_list = classes_string.split(",")
91
+ classes_list = [x.strip() for x in classes_list]
92
+ return classes_list
93
+
94
+ def infer(img, model_name: str, prob_threshold: int, classes_to_show = str):
95
+ feature_extractor = AutoFeatureExtractor.from_pretrained(f"hustvl/{model_name}")
96
+ model = YolosForObjectDetection.from_pretrained(f"hustvl/{model_name}")
97
+
98
+ img = Image.fromarray(img)
99
+
100
+ pixel_values = feature_extractor(img, return_tensors="pt").pixel_values
101
+
102
+ with torch.no_grad():
103
+ outputs = model(pixel_values, output_attentions=True)
104
+
105
+ probas = outputs.logits.softmax(-1)[0, :, :-1]
106
+ keep = probas.max(-1).values > prob_threshold
107
+
108
+ target_sizes = torch.tensor(img.size[::-1]).unsqueeze(0)
109
+ postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
110
+ bboxes_scaled = postprocessed_outputs[0]['boxes']
111
+
112
+ classes_list = get_class_list_from_input(classes_to_show)
113
+ res_img = plot_results(img, probas[keep], bboxes_scaled[keep], model, classes_list)
114
+
115
+ return res_img
116
+
117
+ def plot_results(pil_img, prob, boxes, model, classes_list):
118
+ plt.figure(figsize=(16,10))
119
+ plt.imshow(pil_img)
120
+ ax = plt.gca()
121
+ colors = COLORS * 100
122
+ for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
123
+ cl = p.argmax()
124
+ object_class = model.config.id2label[cl.item()]
125
+
126
+ if len(classes_list) > 0 :
127
+ if object_class not in classes_list:
128
+ continue
129
+
130
+ ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
131
+ fill=False, color=c, linewidth=3))
132
+ text = f'{object_class}: {p[cl]:0.2f}'
133
+ ax.text(xmin, ymin, text, fontsize=15,
134
+ bbox=dict(facecolor='yellow', alpha=0.5))
135
+ plt.axis('off')
136
+ return fig2img(plt.gcf())
137
+
138
+ def fig2img(fig):
139
+ buf = io.BytesIO()
140
+ fig.savefig(buf)
141
+ buf.seek(0)
142
+ img = Image.open(buf)
143
+ return img
144
+
145
+ description = """Object Detection with YOLOS. Choose https://github.com/amikelive/coco-labels/blob/master/coco-labels-2014_2017.txtyour model and you're good to go.
146
+ You can adapt the minimum probability threshold with the slider.
147
+ Additionally you can restrict the classes that will be shown by putting in a comma separated list of
148
+ [COCO classes](https://github.com/amikelive/coco-labels/blob/master/coco-labels-2014_2017.txt).
149
+ Leaving the field empty will show all classes"""
150
+
151
+ image_in = gr.components.Image()
152
+ image_out = gr.components.Image()
153
+ model_choice = gr.components.Dropdown(["yolos-tiny", "yolos-small", "yolos-base", "yolos-small-300", "yolos-small-dwr"], value="yolos-small", label="YOLOS Model")
154
+ prob_threshold_slider = gr.components.Slider(minimum=0, maximum=1.0, step=0.01, value=0.9, label="Probability Threshold")
155
+ classes_to_show = gr.components.Textbox(placeholder="e.g. person, boat", label="Classes to use (empty means all classes)")
156
+
157
+ Iface = gr.Interface(
158
+ fn=infer,
159
+ inputs=[image_in,model_choice, prob_threshold_slider, classes_to_show],
160
+ outputs=image_out,
161
+ #examples=[["examples/10_People_Marching_People_Marching_2_120.jpg"], ["examples/12_Group_Group_12_Group_Group_12_26.jpg"], ["examples/43_Row_Boat_Canoe_43_247.jpg"]],
162
+ title="Object Detection with YOLOS",
163
+ description=description,
164
+ ).launch()