sagar007 commited on
Commit
ffb15d8
·
verified ·
1 Parent(s): 7e75a5c

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +122 -0
  2. model.py +305 -0
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ import os
5
+ import yolov9
6
+
7
+
8
+ # Load your custom-trained model
9
+ model_name='./best.pt'
10
+ #model = YOLO(model_name)
11
+ # Images
12
+
13
+ def yolov9_inference(img_path, image_size, conf_threshold, iou_threshold):
14
+ """
15
+ Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
16
+ the input size and apply test time augmentation.
17
+
18
+ :param model_path: Path to the YOLOv9 model file.
19
+ :param conf_threshold: Confidence threshold for NMS.
20
+ :param iou_threshold: IoU threshold for NMS.
21
+ :param img_path: Path to the image file.
22
+ :param size: Optional, input size for inference.
23
+ :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
24
+ """
25
+ # Import YOLOv9
26
+
27
+ # Load the model
28
+ #model_path = download_models(model_id)
29
+ model = yolov9.load('./best.pt')
30
+
31
+ # Set model parameters
32
+ model.conf = conf_threshold
33
+ model.iou = iou_threshold
34
+
35
+ # Perform inference
36
+ results = model(img_path, size=image_size)
37
+
38
+ # Optionally, show detection bounding boxes on image
39
+ output = results.render()
40
+
41
+ return output[0]
42
+ def app():
43
+ with gr.Blocks():
44
+ with gr.Row():
45
+ with gr.Column():
46
+ img_path = gr.Image(type="filepath", label="Image")
47
+ image_size = gr.Slider(
48
+ label="Image Size",
49
+ minimum=320,
50
+ maximum=1280,
51
+ step=32,
52
+ value=640,
53
+ )
54
+ conf_threshold = gr.Slider(
55
+ label="Confidence Threshold",
56
+ minimum=0.1,
57
+ maximum=1.0,
58
+ step=0.1,
59
+ value=0.4,
60
+ )
61
+ iou_threshold = gr.Slider(
62
+ label="IoU Threshold",
63
+ minimum=0.1,
64
+ maximum=1.0,
65
+ step=0.1,
66
+ value=0.5,
67
+ )
68
+ yolov9_infer = gr.Button(value="Inference")
69
+
70
+
71
+ with gr.Column():
72
+ output_numpy = gr.Image(type="numpy",label="Output")
73
+
74
+ yolov9_infer.click(
75
+ fn=yolov9_inference,
76
+ inputs=[
77
+ img_path,
78
+ image_size,
79
+ conf_threshold,
80
+ iou_threshold,
81
+ ],
82
+ outputs=[output_numpy])
83
+ gr.Examples(
84
+ examples=[
85
+ [
86
+ "./batmanvssuperman.jpg",
87
+ 640,
88
+ 0.4,
89
+ 0.5,
90
+ ],
91
+ [
92
+ "./ironman.jpg",
93
+ 640,
94
+ 0.4,
95
+ 0.5,
96
+ ],
97
+ ],
98
+ fn=yolov9_inference,
99
+ inputs=[
100
+ img_path,
101
+ image_size,
102
+ conf_threshold,
103
+ iou_threshold,
104
+ ],
105
+ outputs=[output_numpy],
106
+ cache_examples=True,
107
+ )
108
+
109
+
110
+
111
+ gradio_app = gr.Blocks()
112
+ with gradio_app:
113
+ gr.HTML(
114
+ """
115
+ <h1 style='text-align: center'>
116
+ YOLOv9: Batman,superman and Ironman Detector
117
+ </h1>
118
+ """)
119
+ with gr.Row():
120
+ with gr.Column():
121
+ app()
122
+ gradio_app.launch(debug=True, share=True)
model.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class DetectMultiBackend(nn.Module):
2
+ # YOLO MultiBackend class for python inference on various backends
3
+ def __init__(self, weights='yolo.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
4
+ # Usage:
5
+ # PyTorch: weights = *.pt
6
+ # TorchScript: *.torchscript
7
+ # ONNX Runtime: *.onnx
8
+ # ONNX OpenCV DNN: *.onnx --dnn
9
+ # OpenVINO: *_openvino_model
10
+ # CoreML: *.mlmodel
11
+ # TensorRT: *.engine
12
+ # TensorFlow SavedModel: *_saved_model
13
+ # TensorFlow GraphDef: *.pb
14
+ # TensorFlow Lite: *.tflite
15
+ # TensorFlow Edge TPU: *_edgetpu.tflite
16
+ # PaddlePaddle: *_paddle_model
17
+ from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
18
+
19
+ super().__init__()
20
+ w = str(weights[0] if isinstance(weights, list) else weights)
21
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
22
+ fp16 &= pt or jit or onnx or engine # FP16
23
+ nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
24
+ stride = 32 # default stride
25
+ cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
26
+ if not (pt or triton):
27
+ w = attempt_download(w) # download if not local
28
+
29
+ if pt: # PyTorch
30
+ model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
31
+ stride = max(int(model.stride.max()), 32) # model stride
32
+ names = model.module.names if hasattr(model, 'module') else model.names # get class names
33
+ model.half() if fp16 else model.float()
34
+ self.model = model # explicitly assign for to(), cpu(), cuda(), half()
35
+ elif jit: # TorchScript
36
+ LOGGER.info(f'Loading {w} for TorchScript inference...')
37
+ extra_files = {'config.txt': ''} # model metadata
38
+ model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
39
+ model.half() if fp16 else model.float()
40
+ if extra_files['config.txt']: # load metadata dict
41
+ d = json.loads(extra_files['config.txt'],
42
+ object_hook=lambda d: {int(k) if k.isdigit() else k: v
43
+ for k, v in d.items()})
44
+ stride, names = int(d['stride']), d['names']
45
+ elif dnn: # ONNX OpenCV DNN
46
+ LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
47
+ check_requirements('opencv-python>=4.5.4')
48
+ net = cv2.dnn.readNetFromONNX(w)
49
+ elif onnx: # ONNX Runtime
50
+ LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
51
+ check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
52
+ import onnxruntime
53
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
54
+ session = onnxruntime.InferenceSession(w, providers=providers)
55
+ output_names = [x.name for x in session.get_outputs()]
56
+ meta = session.get_modelmeta().custom_metadata_map # metadata
57
+ if 'stride' in meta:
58
+ stride, names = int(meta['stride']), eval(meta['names'])
59
+ elif xml: # OpenVINO
60
+ LOGGER.info(f'Loading {w} for OpenVINO inference...')
61
+ check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
62
+ from openvino.runtime import Core, Layout, get_batch
63
+ ie = Core()
64
+ if not Path(w).is_file(): # if not *.xml
65
+ w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
66
+ network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
67
+ if network.get_parameters()[0].get_layout().empty:
68
+ network.get_parameters()[0].set_layout(Layout("NCHW"))
69
+ batch_dim = get_batch(network)
70
+ if batch_dim.is_static:
71
+ batch_size = batch_dim.get_length()
72
+ executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
73
+ stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
74
+ elif engine: # TensorRT
75
+ LOGGER.info(f'Loading {w} for TensorRT inference...')
76
+ import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
77
+ check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
78
+ if device.type == 'cpu':
79
+ device = torch.device('cuda:0')
80
+ Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
81
+ logger = trt.Logger(trt.Logger.INFO)
82
+ with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
83
+ model = runtime.deserialize_cuda_engine(f.read())
84
+ context = model.create_execution_context()
85
+ bindings = OrderedDict()
86
+ output_names = []
87
+ fp16 = False # default updated below
88
+ dynamic = False
89
+ for i in range(model.num_bindings):
90
+ name = model.get_binding_name(i)
91
+ dtype = trt.nptype(model.get_binding_dtype(i))
92
+ if model.binding_is_input(i):
93
+ if -1 in tuple(model.get_binding_shape(i)): # dynamic
94
+ dynamic = True
95
+ context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
96
+ if dtype == np.float16:
97
+ fp16 = True
98
+ else: # output
99
+ output_names.append(name)
100
+ shape = tuple(context.get_binding_shape(i))
101
+ im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
102
+ bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
103
+ binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
104
+ batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
105
+ elif coreml: # CoreML
106
+ LOGGER.info(f'Loading {w} for CoreML inference...')
107
+ import coremltools as ct
108
+ model = ct.models.MLModel(w)
109
+ elif saved_model: # TF SavedModel
110
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
111
+ import tensorflow as tf
112
+ keras = False # assume TF1 saved_model
113
+ model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
114
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
115
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
116
+ import tensorflow as tf
117
+
118
+ def wrap_frozen_graph(gd, inputs, outputs):
119
+ x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
120
+ ge = x.graph.as_graph_element
121
+ return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
122
+
123
+ def gd_outputs(gd):
124
+ name_list, input_list = [], []
125
+ for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
126
+ name_list.append(node.name)
127
+ input_list.extend(node.input)
128
+ return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
129
+
130
+ gd = tf.Graph().as_graph_def() # TF GraphDef
131
+ with open(w, 'rb') as f:
132
+ gd.ParseFromString(f.read())
133
+ frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
134
+ elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
135
+ try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
136
+ from tflite_runtime.interpreter import Interpreter, load_delegate
137
+ except ImportError:
138
+ import tensorflow as tf
139
+ Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
140
+ if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
141
+ LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
142
+ delegate = {
143
+ 'Linux': 'libedgetpu.so.1',
144
+ 'Darwin': 'libedgetpu.1.dylib',
145
+ 'Windows': 'edgetpu.dll'}[platform.system()]
146
+ interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
147
+ else: # TFLite
148
+ LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
149
+ interpreter = Interpreter(model_path=w) # load TFLite model
150
+ interpreter.allocate_tensors() # allocate
151
+ input_details = interpreter.get_input_details() # inputs
152
+ output_details = interpreter.get_output_details() # outputs
153
+ # load metadata
154
+ with contextlib.suppress(zipfile.BadZipFile):
155
+ with zipfile.ZipFile(w, "r") as model:
156
+ meta_file = model.namelist()[0]
157
+ meta = ast.literal_eval(model.read(meta_file).decode("utf-8"))
158
+ stride, names = int(meta['stride']), meta['names']
159
+ elif tfjs: # TF.js
160
+ raise NotImplementedError('ERROR: YOLO TF.js inference is not supported')
161
+ elif paddle: # PaddlePaddle
162
+ LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
163
+ check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
164
+ import paddle.inference as pdi
165
+ if not Path(w).is_file(): # if not *.pdmodel
166
+ w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
167
+ weights = Path(w).with_suffix('.pdiparams')
168
+ config = pdi.Config(str(w), str(weights))
169
+ if cuda:
170
+ config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
171
+ predictor = pdi.create_predictor(config)
172
+ input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
173
+ output_names = predictor.get_output_names()
174
+ elif triton: # NVIDIA Triton Inference Server
175
+ LOGGER.info(f'Using {w} as Triton Inference Server...')
176
+ check_requirements('tritonclient[all]')
177
+ from utils.triton import TritonRemoteModel
178
+ model = TritonRemoteModel(url=w)
179
+ nhwc = model.runtime.startswith("tensorflow")
180
+ else:
181
+ raise NotImplementedError(f'ERROR: {w} is not a supported format')
182
+
183
+ # class names
184
+ if 'names' not in locals():
185
+ names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
186
+ if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
187
+ names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
188
+
189
+ self.__dict__.update(locals()) # assign all variables to self
190
+
191
+ def forward(self, im, augment=False, visualize=False):
192
+ # YOLO MultiBackend inference
193
+ b, ch, h, w = im.shape # batch, channel, height, width
194
+ if self.fp16 and im.dtype != torch.float16:
195
+ im = im.half() # to FP16
196
+ if self.nhwc:
197
+ im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
198
+
199
+ if self.pt: # PyTorch
200
+ y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
201
+ elif self.jit: # TorchScript
202
+ y = self.model(im)
203
+ elif self.dnn: # ONNX OpenCV DNN
204
+ im = im.cpu().numpy() # torch to numpy
205
+ self.net.setInput(im)
206
+ y = self.net.forward()
207
+ elif self.onnx: # ONNX Runtime
208
+ im = im.cpu().numpy() # torch to numpy
209
+ y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
210
+ elif self.xml: # OpenVINO
211
+ im = im.cpu().numpy() # FP32
212
+ y = list(self.executable_network([im]).values())
213
+ elif self.engine: # TensorRT
214
+ if self.dynamic and im.shape != self.bindings['images'].shape:
215
+ i = self.model.get_binding_index('images')
216
+ self.context.set_binding_shape(i, im.shape) # reshape if dynamic
217
+ self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
218
+ for name in self.output_names:
219
+ i = self.model.get_binding_index(name)
220
+ self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
221
+ s = self.bindings['images'].shape
222
+ assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
223
+ self.binding_addrs['images'] = int(im.data_ptr())
224
+ self.context.execute_v2(list(self.binding_addrs.values()))
225
+ y = [self.bindings[x].data for x in sorted(self.output_names)]
226
+ elif self.coreml: # CoreML
227
+ im = im.cpu().numpy()
228
+ im = Image.fromarray((im[0] * 255).astype('uint8'))
229
+ # im = im.resize((192, 320), Image.ANTIALIAS)
230
+ y = self.model.predict({'image': im}) # coordinates are xywh normalized
231
+ if 'confidence' in y:
232
+ box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
233
+ conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
234
+ y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
235
+ else:
236
+ y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
237
+ elif self.paddle: # PaddlePaddle
238
+ im = im.cpu().numpy().astype(np.float32)
239
+ self.input_handle.copy_from_cpu(im)
240
+ self.predictor.run()
241
+ y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
242
+ elif self.triton: # NVIDIA Triton Inference Server
243
+ y = self.model(im)
244
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
245
+ im = im.cpu().numpy()
246
+ if self.saved_model: # SavedModel
247
+ y = self.model(im, training=False) if self.keras else self.model(im)
248
+ elif self.pb: # GraphDef
249
+ y = self.frozen_func(x=self.tf.constant(im))
250
+ else: # Lite or Edge TPU
251
+ input = self.input_details[0]
252
+ int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
253
+ if int8:
254
+ scale, zero_point = input['quantization']
255
+ im = (im / scale + zero_point).astype(np.uint8) # de-scale
256
+ self.interpreter.set_tensor(input['index'], im)
257
+ self.interpreter.invoke()
258
+ y = []
259
+ for output in self.output_details:
260
+ x = self.interpreter.get_tensor(output['index'])
261
+ if int8:
262
+ scale, zero_point = output['quantization']
263
+ x = (x.astype(np.float32) - zero_point) * scale # re-scale
264
+ y.append(x)
265
+ y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
266
+ y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
267
+
268
+ if isinstance(y, (list, tuple)):
269
+ return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
270
+ else:
271
+ return self.from_numpy(y)
272
+
273
+ def from_numpy(self, x):
274
+ return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
275
+
276
+ def warmup(self, imgsz=(1, 3, 640, 640)):
277
+ # Warmup model by running inference once
278
+ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
279
+ if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
280
+ im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
281
+ for _ in range(2 if self.jit else 1): #
282
+ self.forward(im) # warmup
283
+
284
+ @staticmethod
285
+ def _model_type(p='path/to/model.pt'):
286
+ # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
287
+ # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
288
+ from export import export_formats
289
+ from utils.downloads import is_url
290
+ sf = list(export_formats().Suffix) # export suffixes
291
+ if not is_url(p, check=False):
292
+ check_suffix(p, sf) # checks
293
+ url = urlparse(p) # if url may be Triton inference server
294
+ types = [s in Path(p).name for s in sf]
295
+ types[8] &= not types[9] # tflite &= not edgetpu
296
+ triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc])
297
+ return types + [triton]
298
+
299
+ @staticmethod
300
+ def _load_metadata(f=Path('path/to/meta.yaml')):
301
+ # Load metadata from meta.yaml if it exists
302
+ if f.exists():
303
+ d = yaml_load(f)
304
+ return d['stride'], d['names'] # assign stride, names
305
+ return None, None