Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: ParserError Message: Error tokenizing data. C error: EOF inside string starting at row 30639 Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1854, in _prepare_split_single for _, table in generator: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/csv/csv.py", line 190, in _generate_tables for batch_idx, df in enumerate(csv_file_reader): File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1843, in __next__ return self.get_chunk() File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1985, in get_chunk return self.read(nrows=size) File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/readers.py", line 1923, in read ) = self._engine.read( # type: ignore[attr-defined] File "/src/services/worker/.venv/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py", line 234, in read chunks = self._reader.read_low_memory(nrows) File "parsers.pyx", line 850, in pandas._libs.parsers.TextReader.read_low_memory File "parsers.pyx", line 905, in pandas._libs.parsers.TextReader._read_rows File "parsers.pyx", line 874, in pandas._libs.parsers.TextReader._tokenize_rows File "parsers.pyx", line 891, in pandas._libs.parsers.TextReader._check_tokenize_status File "parsers.pyx", line 2061, in pandas._libs.parsers.raise_parser_error pandas.errors.ParserError: Error tokenizing data. C error: EOF inside string starting at row 30639 The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
X
string | y
string |
---|---|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import ast
import json
import platform
import zipfile
from collections import OrderedDict, namedtuple
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, PYTHON_VERSION, ROOT, yaml_load
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
from ultralytics.utils.downloads import attempt_download_asset, is_url
def check_class_names(names):
"""
Check class names.
Map imagenet class codes to human-readable names if required. Convert lists to dicts.
"""
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
if isinstance(names, dict):
# Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True'
names = {int(k): str(v) for k, v in names.items()}
n = len(names)
if max(names.keys()) >= n:
raise KeyError(
f"{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices "
f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML."
)
if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764'
names_map = yaml_load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names
names = {k: names_map[v] for k, v in names.items()}
return names
def default_class_names(data=None):
"""Applies default class names to an input YAML file or returns numerical class names."""
if data:
try:
return yaml_load(check_yaml(data))["names"]
except Exception:
pass
return {i: f"class{i}" for i in range(999)} # return default if above errors
class AutoBackend(nn.Module):
"""
Handles dynamic backend selection for running inference using Ultralytics YOLO [MASK] s.
The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
range of formats, each with specific naming conventions as outlined below:
Supported Formats and Naming Conventions:
| Format | File Suffix |
| --------------------- | ----------------- |
| PyTorch | *.pt |
| TorchScript | *.torchscript |
| ONNX Runtime | *.onnx |
| ONNX OpenCV DNN | *.onnx (dnn=True) |
| OpenVINO | *openvino_ [MASK] / |
| CoreML | *.mlpackage |
| TensorRT | *.engine |
| TensorFlow SavedModel | *_saved_ [MASK] / |
| TensorFlow GraphDef | *.pb |
| TensorFlow Lite | *.tflite |
| TensorFlow Edge TPU | *_edgetpu.tflite |
| PaddlePaddle | *_paddle_ [MASK] / |
| MNN | *.mnn |
| NCNN | *_ncnn_ [MASK] / |
| IMX | *_imx_ [MASK] / |
| RKNN | *_rknn_ [MASK] / |
This class offers dynamic backend switching capabilities based on the input [MASK] format, making it easier to deploy
[MASK] s across various platforms.
"""
@torch.no_grad()
def __init__(
self,
weights="yolo11n.pt",
device=torch.device("cpu"),
dnn=False,
data=None,
fp16=False,
batch=1,
fuse=True,
verbose=True,
):
"""
Initialize the AutoBackend for inference.
Args:
weights (str | torch.nn.Module): Path to the [MASK] weights file or a module instance. Defaults to 'yolo11n.pt'.
device (torch.device): Device to run the [MASK] on. Defaults to CPU.
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
batch (int): Batch-size to assume for inference.
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
verbose (bool): Enable verbose logging. Defaults to True.
"""
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
nn_module = isinstance(weights, torch.nn.Module)
(
pt,
jit,
onnx,
xml,
engine,
coreml,
saved_ [MASK] ,
pb,
tflite,
edgetpu,
tfjs,
paddle,
mnn,
ncnn,
imx,
rknn,
triton,
) = self._ [MASK] _type(w)
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
nhwc = coreml or saved_ [MASK] or pb or tflite or edgetpu or rknn # BHWC formats (vs torch BCWH)
stride = 32 # default stride
end2end = False # default end2end
[MASK] , metadata, task = None, None, None
# Set device
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
device = torch.device("cpu")
cuda = False
# Download if not local
if not (pt or triton or nn_module):
w = attempt_download_asset(w)
# In-memory PyTorch [MASK]
if nn_module:
[MASK] = weights.to(device)
if fuse:
[MASK] = [MASK] .fuse(verbose=verbose)
if hasattr( [MASK] , "kpt_shape"):
kpt_shape = [MASK] .kpt_shape # pose-only
stride = max(int( [MASK] .stride.max()), 32) # [MASK] stride
names = [MASK] .module.names if hasattr( [MASK] , "module") else [MASK] .names # get class names
[MASK] .half() if fp16 else [MASK] .float()
self. [MASK] = [MASK] # explicitly assign for to(), cpu(), cuda(), half()
pt = True
# PyTorch
elif pt:
from ultralytics.nn.tasks import attempt_load_weights
[MASK] = attempt_load_weights(
weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse
)
if hasattr( [MASK] , "kpt_shape"):
kpt_shape = [MASK] .kpt_shape # pose-only
stride = max(int( [MASK] .stride.max()), 32) # [MASK] stride
names = [MASK] .module.names if hasattr( [MASK] , "module") else [MASK] .names # get class names
[MASK] .half() if fp16 else [MASK] .float()
self. [MASK] = [MASK] # explicitly assign for to(), cpu(), cuda(), half()
# TorchScript
elif jit:
LOGGER.info(f"Loading {w} for TorchScript inference...")
extra_files = {"config.txt": ""} # [MASK] metadata
[MASK] = torch.jit.load(w, _extra_files=extra_files, map_location=device)
[MASK] .half() if fp16 else [MASK] .float()
if extra_files["config.txt"]: # load metadata dict
metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items()))
# ONNX OpenCV DNN
elif dnn:
LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...")
check_requirements("opencv-python>=4.5.4")
net = cv2.dnn.readNetFromONNX(w)
# ONNX Runtime and IMX
elif onnx or imx:
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
if IS_RASPBERRYPI or IS_JETSON:
# Fix 'numpy.linalg._umath_linalg' has no attribute '_ilp64' for TF SavedModel on RPi and Jetson
check_requirements("numpy==1.23.5")
import onnxruntime
providers = ["CPUExecutionProvider"]
if cuda and "CUDAExecutionProvider" in onnxruntime.get_available_providers():
providers.insert(0, "CUDAExecutionProvider")
elif cuda: # Only log warning if CUDA was requested but unavailable
LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime with CUDA. Using CPU...")
device = torch.device("cpu")
cuda = False
LOGGER.info(f"Using ONNX Runtime {providers[0]}")
if onnx:
session = onnxruntime.InferenceSession(w, providers=providers)
else:
check_requirements(
[" [MASK] -compression-toolkit==2.1.1", "sony-custom-layers[torch]==0.2.0", "onnxruntime-extensions"]
)
w = next(Path(w).glob("*.onnx"))
LOGGER.info(f"Loading {w} for ONNX IMX inference...")
import mct_quantizers as mctq
from sony_custom_layers.pytorch.object_detection import nms_ort # noqa
session = onnxruntime.InferenceSession(
w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
)
task = "detect"
output_names = [x.name for x in session.get_outputs()]
metadata = session.get_ [MASK] meta().custom_metadata_map
dynamic = isinstance(session.get_outputs()[0].shape[0], str)
fp16 = True if "float16" in session.get_inputs()[0].type else False
if not dynamic:
io = session.io_binding()
bindings = []
for output in session.get_outputs():
out_fp16 = "float16" in output.type
y_tensor = torch.empty(output.shape, dtype=torch.float16 if out_fp16 else torch.float32).to(device)
io.bind_output(
name=output.name,
device_type=device.type,
device_id=device.index if cuda else 0,
element_type=np.float16 if out_fp16 else np.float32,
shape=tuple(y_tensor.shape),
buffer_ptr=y_tensor.data_ptr(),
)
bindings.append(y_tensor)
# OpenVINO
elif xml:
LOGGER.info(f"Loading {w} for OpenVINO inference...")
check_requirements("openvino>=2024.0.0")
import openvino as ov
core = ov.Core()
w = Path(w)
if not w.is_file(): # if not *.xml
w = next(w.glob("*.xml")) # get *.xml file from *_openvino_ [MASK] dir
ov_ [MASK] = core.read_ [MASK] ( [MASK] =str(w), weights=w.with_suffix(".bin"))
if ov_ [MASK] .get_parameters()[0].get_layout().empty:
ov_ [MASK] .get_parameters()[0].set_layout(ov.Layout("NCHW"))
# OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY"
LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch={batch} inference...")
ov_compiled_ [MASK] = core.compile_ [MASK] (
ov_ [MASK] ,
device_name="AUTO", # AUTO selects best available device, do not modify
config={"PERFORMANCE_HINT": inference_mode},
)
input_name = ov_compiled_ [MASK] .input().get_any_name()
metadata = w.parent / "metadata.yaml"
# TensorRT
elif engine:
LOGGER.info(f"Loading {w} for TensorRT inference...")
if IS_JETSON and PYTHON_VERSION <= "3.8.0":
# fix error: `np.bool` was a deprecated alias for the builtin `bool` for JetPack 4 with Python <= 3.8.0
check_requirements("numpy==1.23.5")
try:
import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
except ImportError:
if LINUX:
check_requirements("tensorrt>7.0.0,!=10.1.0")
import tensorrt as trt # noqa
check_version(trt.__version__, ">=7.0.0", hard=True)
check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
if device.type == "cpu":
device = torch.device("cuda:0")
Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr"))
logger = trt.Logger(trt.Logger.INFO)
# Read file
with open(w, "rb") as f, trt.Runtime(logger) as runtime:
try:
meta_len = int.from_bytes(f.read(4), byteorder="little") # read metadata length
metadata = json.loads(f.read(meta_len).decode("utf-8")) # read metadata
except UnicodeDecodeError:
f.seek(0) # engine file may lack embedded Ultralytics metadata
dla = metadata.get("dla", None)
if dla is not None:
runtime.DLA_core = int(dla)
[MASK] = runtime.deserialize_cuda_engine(f.read()) # read engine
# Model context
try:
context = [MASK] .create_execution_context()
except Exception as e: # [MASK] is None
LOGGER.error(f"ERROR: TensorRT [MASK] exported with a different version than {trt.__version__}\n")
raise e
bindings = OrderedDict()
output_names = []
fp16 = False # default updated below
dynamic = False
is_trt10 = not hasattr( [MASK] , "num_bindings")
num = range( [MASK] .num_io_tensors) if is_trt10 else range( [MASK] .num_bindings)
for i in num:
if is_trt10:
name = [MASK] .get_tensor_name(i)
dtype = trt.nptype( [MASK] .get_tensor_dtype(name))
is_input = [MASK] .get_tensor_mode(name) == trt.TensorIOMode.INPUT
if is_input:
if -1 in tuple( [MASK] .get_tensor_shape(name)):
dynamic = True
context.set_input_shape(name, tuple( [MASK] .get_tensor_profile_shape(name, 0)[1]))
if dtype == np.float16:
fp16 = True
else:
output_names.append(name)
shape = tuple(context.get_tensor_shape(name))
else: # TensorRT < 10.0
name = [MASK] .get_binding_name(i)
dtype = trt.nptype( [MASK] .get_binding_dtype(i))
is_input = [MASK] .binding_is_input(i)
if [MASK] .binding_is_input(i):
if -1 in tuple( [MASK] .get_binding_shape(i)): # dynamic
dynamic = True
context.set_binding_shape(i, tuple( [MASK] .get_profile_shape(0, i)[1]))
if dtype == np.float16:
fp16 = True
else:
output_names.append(name)
shape = tuple(context.get_binding_shape(i))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size
# CoreML
elif coreml:
LOGGER.info(f"Loading {w} for CoreML inference...")
import coremltools as ct
[MASK] = ct. [MASK] s.MLModel(w)
metadata = dict( [MASK] .user_defined_metadata)
# TF SavedModel
elif saved_ [MASK] :
LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...")
import tensorflow as tf
keras = False # assume TF1 saved_ [MASK]
[MASK] = tf.keras. [MASK] s.load_ [MASK] (w) if keras else tf.saved_ [MASK] .load(w)
metadata = Path(w) / "metadata.yaml"
# TF GraphDef
elif pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...")
import tensorflow as tf
from ultralytics.engine.exporter import gd_outputs
def wrap_frozen_graph(gd, inputs, outputs):
"""Wrap frozen graphs for deployment."""
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
ge = x.graph.as_graph_element
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
gd = tf.Graph().as_graph_def() # TF GraphDef
with open(w, "rb") as f:
gd.ParseFromString(f.read())
frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
try: # find metadata in SavedModel alongside GraphDef
metadata = next(Path(w).resolve().parent.rglob(f"{Path(w).stem}_saved_ [MASK] */metadata.yaml"))
except StopIteration:
pass
# TFLite or TFLite Edge TPU
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
device = device[3:] if str(device).startswith("tpu") else ":0"
LOGGER.info(f"Loading {w} on device {device[1:]} for TensorFlow Lite Edge TPU inference...")
delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[
platform.system()
]
interpreter = Interpreter(
[MASK] _path=w,
experimental_delegates=[load_delegate(delegate, options={"device": device})],
)
device = "cpu" # Required, otherwise PyTorch will try to use the wrong device
else: # TFLite
LOGGER.info(f"Loading {w} for TensorFlow Lite inference...")
interpreter = Interpreter( [MASK] _path=w) # load TFLite [MASK]
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
# Load metadata
try:
with zipfile.ZipFile(w, "r") as [MASK] :
meta_file = [MASK] .namelist()[0]
metadata = ast.literal_eval( [MASK] .read(meta_file).decode("utf-8"))
except zipfile.BadZipFile:
pass
# TF.js
elif tfjs:
raise NotImplementedError("YOLOv8 TF.js inference is not currently supported.")
# PaddlePaddle
elif paddle:
LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle")
import paddle.inference as pdi # noqa
w = Path(w)
if not w.is_file(): # if not *.pd [MASK]
w = next(w.rglob("*.pd [MASK] ")) # get *.pd [MASK] file from *_paddle_ [MASK] dir
config = pdi.Config(str(w), str(w.with_suffix(".pdiparams")))
if cuda:
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
predictor = pdi.create_predictor(config)
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
output_names = predictor.get_output_names()
metadata = w.parents[1] / "metadata.yaml"
# MNN
elif mnn:
LOGGER.info(f"Loading {w} for MNN inference...")
check_requirements("MNN") # requires MNN
import os
import MNN
config = {"precision": "low", "backend": "CPU", "numThread": (os.cpu_count() + 1) // 2}
rt = MNN.nn.create_runtime_manager((config,))
net = MNN.nn.load_module_from_file(w, [], [], runtime_manager=rt, rearrange=True)
def torch_to_mnn(x):
return MNN.expr.const(x.data_ptr(), x.shape)
metadata = json.loads(net.get_info()["bizCode"])
# NCNN
elif ncnn:
LOGGER.info(f"Loading {w} for NCNN inference...")
check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires NCNN
import ncnn as pyncnn
net = pyncnn.Net()
net.opt.use_vulkan_compute = cuda
w = Path(w)
if not w.is_file(): # if not *.param
w = next(w.glob("*.param")) # get *.param file from *_ncnn_ [MASK] dir
net.load_param(str(w))
net.load_ [MASK] (str(w.with_suffix(".bin")))
metadata = w.parent / "metadata.yaml"
# NVIDIA Triton Inference Server
elif triton:
check_requirements("tritonclient[all]")
from ultralytics.utils.triton import TritonRemoteModel
[MASK] = TritonRemoteModel(w)
metadata = [MASK] .metadata
# RKNN
elif rknn:
if not is_rockchip():
raise OSError("RKNN inference is only supported on Rockchip devices.")
LOGGER.info(f"Loading {w} for RKNN inference...")
check_requirements("rknn-toolkit-lite2")
from rknnlite.api import RKNNLite
w = Path(w)
if not w.is_file(): # if not *.rknn
w = next(w.rglob("*.rknn")) # get *.rknn file from *_rknn_ [MASK] dir
rknn_ [MASK] = RKNNLite()
rknn_ [MASK] .load_rknn(w)
rknn_ [MASK] .init_runtime()
metadata = Path(w).parent / "metadata.yaml"
# Any other format (unsupported)
else:
from ultralytics.engine.exporter import export_formats
raise TypeError(
f" [MASK] ='{w}' is not a supported [MASK] format. Ultralytics supports: {export_formats()['Format']}\n"
f"See https://docs.ultralytics.com/modes/predict for help."
)
# Load external metadata YAML
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
metadata = yaml_load(metadata)
if metadata and isinstance(metadata, dict):
for k, v in metadata.items():
if k in {"stride", "batch"}:
metadata[k] = int(v)
elif k in {"imgsz", "names", "kpt_shape", "args"} and isinstance(v, str):
metadata[k] = eval(v)
stride = metadata["stride"]
task = metadata["task"]
batch = metadata["batch"]
imgsz = metadata["imgsz"]
names = metadata["names"]
kpt_shape = metadata.get("kpt_shape")
end2end = metadata.get("args", {}).get("nms", False)
elif not (pt or triton or nn_module):
LOGGER.warning(f"WARNING ⚠️ Metadata not found for ' [MASK] ={weights}'")
# Check names
if "names" not in locals(): # names missing
names = default_class_names(data)
names = check_class_names(names)
# Disable gradients
if pt:
for p in [MASK] .parameters():
p.requires_grad = False
self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False, embed=None):
"""
Runs inference on the YOLOv8 MultiBackend [MASK] .
Args:
im (torch.Tensor): The image tensor to perform inference on.
augment (bool): whether to perform data augmentation during inference, defaults to False
visualize (bool): whether to visualize the output predictions, defaults to False
embed (list, optional): A list of feature vectors/embeddings to return.
Returns:
(tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True)
"""
b, ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
# PyTorch
if self.pt or self.nn_module:
y = self. [MASK] (im, augment=augment, visualize=visualize, embed=embed)
# TorchScript
elif self.jit:
y = self. [MASK] (im)
# ONNX OpenCV DNN
elif self.dnn:
im = im.cpu().numpy() # torch to numpy
self.net.setInput(im)
y = self.net.forward()
# ONNX Runtime
elif self.onnx or self.imx:
if self.dynamic:
im = im.cpu().numpy() # torch to numpy
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
else:
if not self.cuda:
im = im.cpu()
self.io.bind_input(
name="images",
device_type=im.device.type,
device_id=im.device.index if im.device.type == "cuda" else 0,
element_type=np.float16 if self.fp16 else np.float32,
shape=tuple(im.shape),
buffer_ptr=im.data_ptr(),
)
self.session.run_with_iobinding(self.io)
y = self.bindings
if self.imx:
# boxes, conf, cls
y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
# OpenVINO
elif self.xml:
im = im.cpu().numpy() # FP32
if self.inference_mode in {"THROUGHPUT", "CUMULATIVE_THROUGHPUT"}: # optimized for larger batch-sizes
n = im.shape[0] # number of images in batch
results = [None] * n # preallocate list with None to match the number of images
def callback(request, userdata):
"""Places result in preallocated list using userdata index."""
results[userdata] = request.results
# Create AsyncInferQueue, set the callback and start asynchronous inference for each input image
async_queue = self.ov.runtime.AsyncInferQueue(self.ov_compiled_ [MASK] )
async_queue.set_callback(callback)
for i in range(n):
# Start async inference with userdata=i to specify the position in results list
async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW
async_queue.wait_all() # wait for all inference requests to complete
y = np.concatenate([list(r.values())[0] for r in results])
else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1
y = list(self.ov_compiled_ [MASK] (im).values())
# TensorRT
elif self.engine:
if self.dynamic and im.shape != self.bindings["images"].shape:
if self.is_trt10:
self.context.set_input_shape("images", im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
self.bindings[name].data.resize_(tuple(self.context.get_tensor_shape(name)))
else:
i = self. [MASK] .get_binding_index("images")
self.context.set_binding_shape(i, im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
i = self. [MASK] .get_binding_index(name)
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
s = self.bindings["images"].shape
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max [MASK] size {s}"
self.binding_addrs["images"] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = [self.bindings[x].data for x in sorted(self.output_names)]
# CoreML
elif self.coreml:
im = im[0].cpu().numpy()
im_pil = Image.fromarray((im * 255).astype("uint8"))
# im = im.resize((192, 320), Image.BILINEAR)
y = self. [MASK] .predict({"image": im_pil}) # coordinates are xywh normalized
if "confidence" in y:
raise TypeError(
"Ultralytics only supports inference of non-pipelined CoreML [MASK] s exported with "
f"'nms=False', but ' [MASK] ={w}' has an NMS pipeline created by an 'nms=True' export."
)
# TODO: CoreML NMS inference handling
# from ultralytics.utils.ops import xywh2xyxy
# box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
# conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float32)
# y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
y = list(y.values())
if len(y) == 2 and len(y[1].shape) != 4: # segmentation [MASK]
y = list(reversed(y)) # reversed for segmentation [MASK] s (pred, proto)
# PaddlePaddle
elif self.paddle:
im = im.cpu().numpy().astype(np.float32)
self.input_handle.copy_from_cpu(im)
self.predictor.run()
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
# MNN
elif self.mnn:
input_var = self.torch_to_mnn(im)
output_var = self.net.onForward([input_var])
y = [x.read() for x in output_var]
# NCNN
elif self.ncnn:
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
with self.net.create_extractor() as ex:
ex.input(self.net.input_names()[0], mat_in)
# WARNING: 'output_names' sorted as a temporary fix for https://github.com/pnnx/pnnx/issues/130
y = [np.array(ex.extract(x)[1])[None] for x in sorted(self.net.output_names())]
# NVIDIA Triton Inference Server
elif self.triton:
im = im.cpu().numpy() # torch to numpy
y = self. [MASK] (im)
# RKNN
elif self.rknn:
im = (im.cpu().numpy() * 255).astype("uint8")
im = im if isinstance(im, (list, tuple)) else [im]
y = self.rknn_ [MASK] .inference(inputs=im)
# TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
else:
im = im.cpu().numpy()
if self.saved_ [MASK] : # SavedModel
y = self. [MASK] (im, training=False) if self.keras else self. [MASK] (im)
if not isinstance(y, list):
y = [y]
elif self.pb: # GraphDef
y = self.frozen_func(x=self.tf.constant(im))
else: # Lite or Edge TPU
details = self.input_details[0]
is_int = details["dtype"] in {np.int8, np.int16} # is TFLite quantized int8 or int16 [MASK]
if is_int:
scale, zero_point = details["quantization"]
im = (im / scale + zero_point).astype(details["dtype"]) # de-scale
self.interpreter.set_tensor(details["index"], im)
self.interpreter.invoke()
y = []
for output in self.output_details:
x = self.interpreter.get_tensor(output["index"])
if is_int:
scale, zero_point = output["quantization"]
x = (x.astype(np.float32) - zero_point) * scale # re-scale
if x.ndim == 3: # if task is not classification, excluding masks (ndim=4) as well
# Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695
# xywh are normalized in TFLite/EdgeTPU to mitigate quantization error of integer [MASK] s
if x.shape[-1] == 6 or self.end2end: # end-to-end [MASK]
x[:, :, [0, 2]] *= w
x[:, :, [1, 3]] *= h
if self.task == "pose":
x[:, :, 6::3] *= w
x[:, :, 7::3] *= h
else:
x[:, [0, 2]] *= w
x[:, [1, 3]] *= h
if self.task == "pose":
x[:, 5::3] *= w
x[:, 6::3] *= h
y.append(x)
# TF segment fixes: export is reversed vs ONNX export and protos are transposed
if len(y) == 2: # segment with (det, proto) output order reversed
if len(y[1].shape) != 4:
y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32)
if y[1].shape[-1] == 6: # end-to-end [MASK]
y = [y[1]]
else:
y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160)
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
# for x in y:
# print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes
if isinstance(y, (list, tuple)):
if len(self.names) == 999 and (self.task == "segment" or len(y) == 2): # segments and names not defined
nc = y[0].shape[1] - y[1].shape[1] - 4 # y = (1, 32, 160, 160), (1, 116, 8400)
self.names = {i: f"class{i}" for i in range(nc)}
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
else:
return self.from_numpy(y)
def from_numpy(self, x):
"""
Convert a numpy array to a tensor.
Args:
x (np.ndarray): The array to be converted.
Returns:
(torch.Tensor): The converted tensor
"""
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
"""
Warm up the [MASK] by running one forward pass with a dummy input.
Args:
imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width)
"""
import torchvision # noqa (import here so torchvision import time not recorded in postprocess time)
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_ [MASK] , self.pb, self.triton, self.nn_module
if any(warmup_types) and (self.device.type != "cpu" or self.triton):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.jit else 1):
self.forward(im) # warmup
@staticmethod
def _ [MASK] _type(p="path/to/ [MASK] .pt"):
"""
Takes a path to a [MASK] file and returns the [MASK] type. Possibles types are pt, jit, onnx, xml, engine, coreml,
saved_ [MASK] , pb, tflite, edgetpu, tfjs, ncnn or paddle.
Args:
p (str): path to the [MASK] file. Defaults to path/to/ [MASK] .pt
Examples:
>>> [MASK] = AutoBackend(weights="path/to/ [MASK] .onnx")
>>> [MASK] _type = [MASK] ._ [MASK] _type() # returns "onnx"
"""
from ultralytics.engine.exporter import export_formats
sf = export_formats()["Suffix"] # export suffixes
if not is_url(p) and not isinstance(p, str):
check_suffix(p, sf) # checks
name = Path(p).name
types = [s in name for s in sf]
types[5] |= name.endswith(".ml [MASK] ") # retain support for older Apple CoreML *.ml [MASK] formats
types[8] &= not types[9] # tflite &= not edgetpu
if any(types):
triton = False
else:
from urllib.parse import urlsplit
url = urlsplit(p)
triton = bool(url.netloc) and bool(url.path) and url.scheme in {"http", "grpc"}
return types + [triton]
| model |
import copy
import operator
import re
import sys
import textwrap
import threading
import unittest
import weakref
try:
import _testcapi
except ImportError:
_testcapi = None
from collections.abc import Mapping
from test import support
from test.support import import_helper, threading_helper
from test.support.script_helper import assert_python_ok
from test import mapping_tests
class ClearTest(unittest.TestCase):
"""
Tests for frame.clear().
"""
def inner(self, x=5, **kwargs):
1/0
def outer(self, **kwargs):
try:
self.inner(**kwargs)
except ZeroDivisionError as e:
exc = e
return exc
def clear_traceback_ [MASK] (self, tb):
"""
Clear all [MASK] in a traceback.
"""
while tb is not None:
tb.tb_frame.clear()
tb = tb.tb_next
def test_clear_locals(self):
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
support.gc_collect()
# A reference to c is held through the [MASK]
self.assertIsNot(None, wr())
self.clear_traceback_ [MASK] (exc.__traceback__)
support.gc_collect()
# The reference was released by .clear()
self.assertIs(None, wr())
def test_clear_locals_after_f_locals_access(self):
# see gh-113939
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
1/0
try:
inner()
except ZeroDivisionError as exc:
support.gc_collect()
self.assertIsNotNone(wr())
exc.__traceback__.tb_next.tb_frame.clear()
support.gc_collect()
self.assertIsNone(wr())
def test_clear_does_not_clear_specials(self):
class C:
pass
c = C()
exc = self.outer(c=c)
del c
f = exc.__traceback__.tb_frame
f.clear()
self.assertIsNot(f.f_code, None)
self.assertIsNot(f.f_locals, None)
self.assertIsNot(f.f_builtins, None)
self.assertIsNot(f.f_globals, None)
def test_clear_generator(self):
endly = False
def g():
nonlocal endly
try:
yield
self.inner()
finally:
endly = True
gen = g()
next(gen)
self.assertFalse(endly)
# Cannot clear a suspended frame
with self.assertRaisesRegex(RuntimeError, r'suspended frame'):
gen.gi_frame.clear()
self.assertFalse(endly)
def test_clear_executing(self):
# Attempting to clear an executing frame is forbidden.
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
def test_clear_executing_generator(self):
# Attempting to clear an executing generator frame is forbidden.
endly = False
def g():
nonlocal endly
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
yield f
finally:
endly = True
gen = g()
f = next(gen)
self.assertFalse(endly)
# Cannot clear a suspended frame
with self.assertRaisesRegex(RuntimeError, 'suspended frame'):
f.clear()
self.assertFalse(endly)
def test_lineno_with_tracing(self):
def record_line():
f = sys._getframe(1)
lines.append(f.f_lineno-f.f_code.co_firstlineno)
def test(trace):
record_line()
if trace:
sys._getframe(0).f_trace = True
record_line()
record_line()
expected_lines = [1, 4, 5]
lines = []
test(False)
self.assertEqual(lines, expected_lines)
lines = []
test(True)
self.assertEqual(lines, expected_lines)
@support.cpython_only
def test_clear_refcycles(self):
# .clear() doesn't leave any refcycle behind
with support.disable_gc():
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
self.assertIsNot(None, wr())
self.clear_traceback_ [MASK] (exc.__traceback__)
self.assertIs(None, wr())
class FrameAttrsTest(unittest.TestCase):
def make_ [MASK] (self):
def outer():
x = 5
y = 6
def inner():
z = x + 2
1/0
t = 9
return inner()
try:
outer()
except ZeroDivisionError as e:
tb = e.__traceback__
[MASK] = []
while tb:
[MASK] .append(tb.tb_frame)
tb = tb.tb_next
return [MASK]
def test_clear_locals(self):
# Test f_locals after clear() (issue #21897)
f, outer, inner = self.make_ [MASK] ()
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
def test_locals_clear_locals(self):
# Test f_locals before and after clear() (to exercise caching)
f, outer, inner = self.make_ [MASK] ()
self.assertNotEqual(outer.f_locals, {})
self.assertNotEqual(inner.f_locals, {})
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
def test_f_lineno_del_segfault(self):
f, _, _ = self.make_ [MASK] ()
with self.assertRaises(AttributeError):
del f.f_lineno
def test_f_generator(self):
# Test f_generator in different contexts.
def t0():
def nested():
frame = sys._getframe()
return frame.f_generator
def gen():
yield nested()
g = gen()
try:
return next(g)
finally:
g.close()
def t1():
frame = sys._getframe()
return frame.f_generator
def t2():
frame = sys._getframe()
yield frame.f_generator
async def t3():
frame = sys._getframe()
return frame.f_generator
# For regular functions f_generator is None
self.assertIsNone(t0())
self.assertIsNone(t1())
# For generators f_generator is equal to self
g = t2()
try:
frame_g = next(g)
self.assertIs(g, frame_g)
finally:
g.close()
# Ditto for coroutines
c = t3()
try:
c.send(None)
except StopIteration as ex:
self.assertIs(ex.value, c)
else:
raise AssertionError('coroutine did not exit')
class ReprTest(unittest.TestCase):
"""
Tests for repr(frame).
"""
def test_repr(self):
def outer():
x = 5
y = 6
def inner():
z = x + 2
1/0
t = 9
return inner()
offset = outer.__code__.co_firstlineno
try:
outer()
except ZeroDivisionError as e:
tb = e.__traceback__
[MASK] = []
while tb:
[MASK] .append(tb.tb_frame)
tb = tb.tb_next
else:
self.fail("should have raised")
f_this, f_outer, f_inner = [MASK]
file_repr = re.escape(repr(__file__))
self.assertRegex(repr(f_this),
r"^<frame at 0x[0-9a-fA-F]+, file %s, line %d, code test_repr>$"
% (file_repr, offset + 23))
self.assertRegex(repr(f_outer),
r"^<frame at 0x[0-9a-fA-F]+, file %s, line %d, code outer>$"
% (file_repr, offset + 7))
self.assertRegex(repr(f_inner),
r"^<frame at 0x[0-9a-fA-F]+, file %s, line %d, code inner>$"
% (file_repr, offset + 5))
class TestFrameLocals(unittest.TestCase):
def test_scope(self):
class A:
x = 1
sys._getframe().f_locals['x'] = 2
sys._getframe().f_locals['y'] = 2
self.assertEqual(A.x, 2)
self.assertEqual(A.y, 2)
def f():
x = 1
sys._getframe().f_locals['x'] = 2
sys._getframe().f_locals['y'] = 2
self.assertEqual(x, 2)
self.assertEqual(locals()['y'], 2)
f()
def test_closure(self):
x = 1
y = 2
def f():
z = x + y
d = sys._getframe().f_locals
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 2)
d['x'] = 2
d['y'] = 3
f()
self.assertEqual(x, 2)
self.assertEqual(y, 3)
def test_as_dict(self):
x = 1
y = 2
d = sys._getframe().f_locals
# self, x, y, d
self.assertEqual(len(d), 4)
self.assertIs(d['d'], d)
self.assertEqual(set(d.keys()), set(['x', 'y', 'd', 'self']))
self.assertEqual(len(d.values()), 4)
self.assertIn(1, d.values())
self.assertEqual(len(d.items()), 4)
self.assertIn(('x', 1), d.items())
self.assertEqual(d.__getitem__('x'), 1)
d.__setitem__('x', 2)
self.assertEqual(d['x'], 2)
self.assertEqual(d.get('x'), 2)
self.assertIs(d.get('non_exist', None), None)
self.assertEqual(d.__len__(), 4)
self.assertEqual(set([key for key in d]), set(['x', 'y', 'd', 'self']))
self.assertIn('x', d)
self.assertTrue(d.__contains__('x'))
self.assertEqual(reversed(d), list(reversed(d.keys())))
d.update({'x': 3, 'z': 4})
self.assertEqual(d['x'], 3)
self.assertEqual(d['z'], 4)
with self.assertRaises(TypeError):
d.update([1, 2])
self.assertEqual(d.setdefault('x', 5), 3)
self.assertEqual(d.setdefault('new', 5), 5)
self.assertEqual(d['new'], 5)
with self.assertRaises(KeyError):
d['non_exist']
def test_as_number(self):
x = 1
y = 2
d = sys._getframe().f_locals
self.assertIn('z', d | {'z': 3})
d |= {'z': 3}
self.assertEqual(d['z'], 3)
d |= {'y': 3}
self.assertEqual(d['y'], 3)
with self.assertRaises(TypeError):
d |= 3
with self.assertRaises(TypeError):
_ = d | [3]
def test_non_string_key(self):
d = sys._getframe().f_locals
d[1] = 2
self.assertEqual(d[1], 2)
def test_write_with_hidden(self):
def f():
f_locals = [sys._getframe().f_locals for b in [0]][0]
f_locals['b'] = 2
f_locals['c'] = 3
self.assertEqual(b, 2)
self.assertEqual(c, 3)
b = 0
c = 0
f()
def test_local_objects(self):
o = object()
k = '.'.join(['a', 'b', 'c'])
f_locals = sys._getframe().f_locals
f_locals['o'] = f_locals['k']
self.assertEqual(o, 'a.b.c')
def test_copy(self):
x = 0
d = sys._getframe().f_locals
d_copy = d.copy()
self.assertIsInstance(d_copy, dict)
self.assertEqual(d_copy['x'], 0)
d_copy['x'] = 1
self.assertEqual(x, 0)
def test_update_with_self(self):
def f():
f_locals = sys._getframe().f_locals
f_locals.update(f_locals)
f_locals.update(f_locals)
f_locals.update(f_locals)
f()
def test_repr(self):
x = 1
# Introduce a reference cycle
frame = sys._getframe()
self.assertEqual(repr(frame.f_locals), repr(dict(frame.f_locals)))
def test_delete(self):
x = 1
d = sys._getframe().f_locals
# This needs to be tested before f_extra_locals is created
with self.assertRaisesRegex(KeyError, 'non_exist'):
del d['non_exist']
with self.assertRaises(KeyError):
d.pop('non_exist')
with self.assertRaisesRegex(ValueError, 'local variables'):
del d['x']
with self.assertRaises(AttributeError):
d.clear()
with self.assertRaises(ValueError):
d.pop('x')
with self.assertRaises(ValueError):
d.pop('x', None)
# 'm', 'n' is stored in f_extra_locals
d['m'] = 1
d['n'] = 1
with self.assertRaises(KeyError):
d.pop('non_exist')
del d['m']
self.assertEqual(d.pop('n'), 1)
self.assertNotIn('m', d)
self.assertNotIn('n', d)
self.assertEqual(d.pop('n', 2), 2)
@support.cpython_only
def test_sizeof(self):
proxy = sys._getframe().f_locals
support.check_sizeof(self, proxy, support.calcobjsize("P"))
def test_unsupport(self):
x = 1
d = sys._getframe().f_locals
with self.assertRaises(TypeError):
copy.copy(d)
with self.assertRaises(TypeError):
copy.deepcopy(d)
def test_is_mapping(self):
x = 1
d = sys._getframe().f_locals
self.assertIsInstance(d, Mapping)
match d:
case {"x": value}:
self.assertEqual(value, 1)
kind = "mapping"
case _:
kind = "other"
self.assertEqual(kind, "mapping")
def _x_stringlikes(self):
class StringSubclass(str):
pass
class ImpostorX:
def __hash__(self):
return hash('x')
def __eq__(self, other):
return other == 'x'
return StringSubclass('x'), ImpostorX(), 'x'
def test_proxy_key_stringlikes_overwrite(self):
def f(obj):
x = 1
proxy = sys._getframe().f_locals
proxy[obj] = 2
return (
list(proxy.keys()),
dict(proxy),
proxy
)
for obj in self._x_stringlikes():
with self.subTest(cls=type(obj).__name__):
keys_snapshot, proxy_snapshot, proxy = f(obj)
expected_keys = ['obj', 'x', 'proxy']
expected_dict = {'obj': 'x', 'x': 2, 'proxy': proxy}
self.assertEqual(proxy.keys(), expected_keys)
self.assertEqual(proxy, expected_dict)
self.assertEqual(keys_snapshot, expected_keys)
self.assertEqual(proxy_snapshot, expected_dict)
def test_proxy_key_stringlikes_ftrst_write(self):
def f(obj):
proxy = sys._getframe().f_locals
proxy[obj] = 2
self.assertEqual(x, 2)
x = 1
for obj in self._x_stringlikes():
with self.subTest(cls=type(obj).__name__):
f(obj)
def test_proxy_key_unhashables(self):
class StringSubclass(str):
__hash__ = None
class ObjectSubclass:
__hash__ = None
proxy = sys._getframe().f_locals
for obj in StringSubclass('x'), ObjectSubclass():
with self.subTest(cls=type(obj).__name__):
with self.assertRaises(TypeError):
proxy[obj]
with self.assertRaises(TypeError):
proxy[obj] = 0
def test_constructor(self):
FrameLocalsProxy = type([sys._getframe().f_locals
for x in range(1)][0])
self.assertEqual(FrameLocalsProxy.__name__, 'FrameLocalsProxy')
def make_frame():
x = 1
y = 2
return sys._getframe()
proxy = FrameLocalsProxy(make_frame())
self.assertEqual(proxy, {'x': 1, 'y': 2})
# constructor expects 1 frame argument
with self.assertRaises(TypeError):
FrameLocalsProxy() # no arguments
with self.assertRaises(TypeError):
FrameLocalsProxy(123) # wrong type
with self.assertRaises(TypeError):
FrameLocalsProxy(frame=sys._getframe()) # no keyword arguments
class FrameLocalsProxyMappingTests(mapping_tests.TestHashMappingProtocol):
"""Test that FrameLocalsProxy behaves like a Mapping (with exceptions)"""
def _f(*args, **kwargs):
def _f():
return sys._getframe().f_locals
return _f()
type2test = _f
@unittest.skipIf(True, 'Locals proxies for different [MASK] never compare as equal')
def test_constructor(self):
pass
@unittest.skipIf(True, 'Unlike a mapping: del proxy[key] fails')
def test_write(self):
pass
@unittest.skipIf(True, 'Unlike a mapping: no proxy.popitem')
def test_popitem(self):
pass
@unittest.skipIf(True, 'Unlike a mapping: no proxy.pop')
def test_pop(self):
pass
@unittest.skipIf(True, 'Unlike a mapping: no proxy.clear')
def test_clear(self):
pass
@unittest.skipIf(True, 'Unlike a mapping: no proxy.fromkeys')
def test_fromkeys(self):
pass
# no del
def test_getitem(self):
mapping_tests.BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
@unittest.skipIf(True, 'Unlike a mapping: no proxy.update')
def test_update(self):
pass
# proxy.copy returns a regular dict
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assertRaises(TypeError, d.copy, None)
self.assertIsInstance(d.copy(), dict)
@unittest.skipIf(True, 'Locals proxies for different [MASK] never compare as equal')
def test_eq(self):
pass
class TestFrameCApi(unittest.TestCase):
def test_basic(self):
x = 1
ctypes = import_helper.import_module('ctypes')
PyEval_GetFrameLocals = ctypes.pythonapi.PyEval_GetFrameLocals
PyEval_GetFrameLocals.restype = ctypes.py_object
frame_locals = PyEval_GetFrameLocals()
self.assertTrue(type(frame_locals), dict)
self.assertEqual(frame_locals['x'], 1)
frame_locals['x'] = 2
self.assertEqual(x, 1)
PyEval_GetFrameGlobals = ctypes.pythonapi.PyEval_GetFrameGlobals
PyEval_GetFrameGlobals.restype = ctypes.py_object
frame_globals = PyEval_GetFrameGlobals()
self.assertTrue(type(frame_globals), dict)
self.assertIs(frame_globals, globals())
PyEval_GetFrameBuiltins = ctypes.pythonapi.PyEval_GetFrameBuiltins
PyEval_GetFrameBuiltins.restype = ctypes.py_object
frame_builtins = PyEval_GetFrameBuiltins()
self.assertEqual(frame_builtins, __builtins__)
PyFrame_GetLocals = ctypes.pythonapi.PyFrame_GetLocals
PyFrame_GetLocals.argtypes = [ctypes.py_object]
PyFrame_GetLocals.restype = ctypes.py_object
frame = sys._getframe()
f_locals = PyFrame_GetLocals(frame)
self.assertTrue(f_locals['x'], 1)
f_locals['x'] = 2
self.assertEqual(x, 2)
class TestIncompleteFrameAreInvisible(unittest.TestCase):
def test_issue95818(self):
# See GH-95818 for details
code = textwrap.dedent(f"""
import gc
gc.set_threshold(1,1,1)
class GCHello:
def __del__(self):
print("Destroyed from gc")
def gen():
yield
fd = open({__file__!r})
l = [fd, GCHello()]
l.append(l)
del fd
del l
gen()
""")
assert_python_ok("-c", code)
@support.cpython_only
@threading_helper.requires_working_threading()
def test_sneaky_frame_object_teardown(self):
class SneakyDel:
def __del__(self):
"""
Stash a reference to the entire stack for walking later.
It may look crazy, but you'd be surprised how common this is
when using a test runner (like pytest). The typical recipe is:
ResourceWarning + -Werror + a custom sys.unraisablehook.
"""
nonlocal sneaky_frame_object
sneaky_frame_object = sys._getframe()
class SneakyThread(threading.Thread):
"""
A separate thread isn't needed to make this code crash, but it does
make crashes more consistent, since it means sneaky_frame_object is
backed by freed memory after the thread completes!
"""
def run(self):
"""Run SneakyDel.__del__ as this frame is popped."""
ref = SneakyDel()
sneaky_frame_object = None
t = SneakyThread()
t.start()
t.join()
# sneaky_frame_object can be anything, really, but it's crucial that
# SneakyThread.run's frame isn't anywhere on the stack while it's being
# torn down:
self.assertIsNotNone(sneaky_frame_object)
while sneaky_frame_object is not None:
self.assertIsNot(
sneaky_frame_object.f_code, SneakyThread.run.__code__
)
sneaky_frame_object = sneaky_frame_object.f_back
def test_entry_ [MASK] _are_invisible_during_teardown(self):
class C:
"""A weakref'able class."""
def f():
"""Try to find globals and locals as this frame is being cleared."""
ref = C()
# Ignore the fact that exec(C()) is a nonsense callback. We're only
# using exec here because it tries to access the current frame's
# globals and locals. If it's trying to get those from a shim frame,
# we'll crash before raising:
return weakref.ref(ref, exec)
with support.catch_unraisable_exception() as catcher:
# Call from C, so there is a shim frame directly above f:
weak = operator.call(f) # BOOM!
# Cool, we didn't crash. Check that the callback actually happened:
self.assertIs(catcher.unraisable.exc_type, TypeError)
self.assertIsNone(weak())
if __name__ == "__main__":
unittest.main()
| frames |
"""The tests for the Xiaomi vacuum platform."""
from collections.abc import Generator
from datetime import datetime, time, timedelta
from unittest import mock
from unittest.mock import MagicMock, patch
from miio import DeviceException
import pytest
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN as VACUUM_DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SEND_COMMAND,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_STOP,
VacuumActivity,
)
from homeassistant.components.xiaomi_miio.const import (
CONF_FLOW_TYPE,
DOMAIN as XIAOMI_DOMAIN,
MODELS_VACUUM,
)
from homeassistant.components.xiaomi_miio.vacuum import (
ATTR_ERROR,
ATTR_TIMERS,
CONF_DEVICE,
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_HOST,
CONF_MAC,
CONF_MODEL,
CONF_TOKEN,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.util import dt as dt_util
from . import TEST_MAC
from tests.common import MockConfigEntry, async_fire_time_changed
# calls made when device status is requested
STATUS_CALLS = [
mock.call.status(),
mock.call.consumable_status(),
mock.call.clean_history(),
mock.call.dnd_status(),
mock.call.timer(),
]
@pytest.fixture(name="mock_mirobo_is_got_error")
def mirobo_is_got_error_fixture():
"""Mock mock_mirobo."""
mock_vacuum = MagicMock()
mock_vacuum.status().data = {"test": "raw"}
mock_vacuum.status().is_on = False
mock_vacuum.status().fanspeed = 38
mock_vacuum.status().got_error = True
mock_vacuum.status().error = "Error message"
mock_vacuum.status().battery = 82
mock_vacuum.status().clean_area = 123.43218
mock_vacuum.status().clean_time = timedelta(hours=2, minutes=35, seconds=34)
mock_vacuum.last_clean_details().start = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
mock_vacuum.last_clean_details().end = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
mock_vacuum.consumable_status().main_brush_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.consumable_status().side_brush_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.consumable_status().filter_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.clean_history().count = "35"
mock_vacuum.clean_history().total_area = 123.43218
mock_vacuum.clean_history().total_duration = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.status(). [MASK] = "Test Xiaomi Charging"
mock_vacuum.dnd_status().enabled = True
mock_vacuum.dnd_status().start = time(hour=22, minute=0)
mock_vacuum.dnd_status().end = time(hour=6, minute=0)
mock_timer_1 = MagicMock()
mock_timer_1.enabled = True
mock_timer_1.cron = "5 5 1 8 1"
mock_timer_1.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC)
mock_timer_2 = MagicMock()
mock_timer_2.enabled = False
mock_timer_2.cron = "5 5 1 8 2"
mock_timer_2.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC)
mock_vacuum.timer.return_value = [mock_timer_1, mock_timer_2]
with patch(
"homeassistant.components.xiaomi_miio.RoborockVacuum"
) as mock_vacuum_cls:
mock_vacuum_cls.return_value = mock_vacuum
yield mock_vacuum
old_fanspeeds = {
"Silent": 38,
"Standard": 60,
"Medium": 77,
"Turbo": 90,
}
new_fanspeeds = {
"Silent": 101,
"Standard": 102,
"Medium": 103,
"Turbo": 104,
"Gentle": 105,
}
@pytest.fixture(name="mock_mirobo_fanspeeds", params=[old_fanspeeds, new_fanspeeds])
def mirobo_old_speeds_fixture(
request: pytest.FixtureRequest,
) -> Generator[MagicMock]:
"""Fixture for testing both types of fanspeeds."""
mock_vacuum = MagicMock()
mock_vacuum.status().battery = 32
mock_vacuum.fan_speed_presets.return_value = request.param
mock_vacuum.status().fanspeed = list(request.param.values())[0]
mock_vacuum.last_clean_details().start = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
mock_vacuum.last_clean_details().end = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
with patch(
"homeassistant.components.xiaomi_miio.RoborockVacuum"
) as mock_vacuum_cls:
mock_vacuum_cls.return_value = mock_vacuum
yield mock_vacuum
@pytest.fixture(name="mock_mirobo_is_on")
def mirobo_is_on_fixture():
"""Mock mock_mirobo."""
mock_vacuum = MagicMock()
mock_vacuum.status().data = {"test": "raw"}
mock_vacuum.status().is_on = True
mock_vacuum.fan_speed_presets.return_value = new_fanspeeds
mock_vacuum.status().fanspeed = list(new_fanspeeds.values())[0]
mock_vacuum.status().got_error = False
mock_vacuum.status().battery = 32
mock_vacuum.status().clean_area = 133.43218
mock_vacuum.status().clean_time = timedelta(hours=2, minutes=55, seconds=34)
mock_vacuum.consumable_status().main_brush_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.consumable_status().side_brush_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.consumable_status().filter_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.clean_history().count = "41"
mock_vacuum.clean_history().total_area = 323.43218
mock_vacuum.clean_history().total_duration = timedelta(
hours=11, minutes=15, seconds=34
)
mock_vacuum.status(). [MASK] = "Test Xiaomi Cleaning"
mock_vacuum.status(). [MASK] _code = 5
mock_vacuum.dnd_status().enabled = False
mock_vacuum.last_clean_details().start = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
mock_vacuum.last_clean_details().end = datetime(
2020, 4, 1, 13, 21, 10, tzinfo=dt_util.UTC
)
mock_vacuum.last_clean_details().duration = timedelta(
hours=11, minutes=15, seconds=34
)
mock_vacuum.last_clean_details().area = 133.43218
mock_vacuum.last_clean_details().error_code = 1
mock_vacuum.last_clean_details().error = "test_error_code"
mock_vacuum.last_clean_details().complete = True
mock_timer_1 = MagicMock()
mock_timer_1.enabled = True
mock_timer_1.cron = "5 5 1 8 1"
mock_timer_1.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC)
mock_timer_2 = MagicMock()
mock_timer_2.enabled = False
mock_timer_2.cron = "5 5 1 8 2"
mock_timer_2.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC)
mock_vacuum.timer.return_value = [mock_timer_1, mock_timer_2]
with patch(
"homeassistant.components.xiaomi_miio.RoborockVacuum"
) as mock_vacuum_cls:
mock_vacuum_cls.return_value = mock_vacuum
yield mock_vacuum
async def test_xiaomi_exceptions(hass: HomeAssistant, mock_mirobo_is_on) -> None:
"""Test error logging on exceptions."""
entity_name = "test_vacuum_cleaner_error"
entity_id = await setup_component(hass, entity_name)
def is_available():
[MASK] = hass. [MASK] s.get(entity_id)
return [MASK] . [MASK] != STATE_UNAVAILABLE
# The initial setup has to be done successfully
assert is_available()
# Second update causes an exception, which should be logged
mock_mirobo_is_on.status.side_effect = DeviceException("dummy exception")
future = dt_util.utcnow() + timedelta(seconds=60)
async_fire_time_changed(hass, future)
await hass.async_block_till_done(wait_background_tasks=True)
assert not is_available()
# Third update does not get logged as the device is already unavailable,
# so we clear the log and reset the status to test that
mock_mirobo_is_on.status.reset_mock()
future += timedelta(seconds=60)
async_fire_time_changed(hass, future)
await hass.async_block_till_done(wait_background_tasks=True)
assert not is_available()
assert mock_mirobo_is_on.status.call_count == 1
async def test_xiaomi_vacuum_services(
hass: HomeAssistant, mock_mirobo_is_got_error
) -> None:
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_1"
entity_id = await setup_component(hass, entity_name)
# Check [MASK] attributes
[MASK] = hass. [MASK] s.get(entity_id)
assert [MASK] . [MASK] == VacuumActivity.ERROR
assert [MASK] .attributes.get(ATTR_SUPPORTED_FEATURES) == 14204
assert [MASK] .attributes.get(ATTR_ERROR) == "Error message"
assert [MASK] .attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-80"
assert [MASK] .attributes.get(ATTR_TIMERS) == [
{
"enabled": True,
"cron": "5 5 1 8 1",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC),
},
{
"enabled": False,
"cron": "5 5 1 8 2",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC),
},
]
# Call services
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_START, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.resume_or_start()], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_PAUSE, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.pause()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_STOP, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.stop()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.home()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_LOCATE, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.find()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.spot()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": entity_id, "command": "raw"},
blocking=True,
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.raw_command("raw", None)], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
VACUUM_DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": entity_id, "command": "raw", "params": {"k1": 2}},
blocking=True,
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.raw_command("raw", {"k1": 2})], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
@pytest.mark.parametrize(
("error", "status_calls"),
[(None, STATUS_CALLS), (DeviceException("dummy exception"), [])],
)
@pytest.mark.parametrize(
("service", "service_data", "device_method", "device_method_call"),
[
(
SERVICE_START_REMOTE_CONTROL,
{ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2"},
"manual_start",
mock.call(),
),
(
SERVICE_MOVE_REMOTE_CONTROL,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"duration": 1000,
"rotation": -40,
"velocity": -0.1,
},
"manual_control",
mock.call(duration=1000, rotation=-40, velocity=-0.1),
),
(
SERVICE_STOP_REMOTE_CONTROL,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
},
"manual_stop",
mock.call(),
),
(
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"duration": 2000,
"rotation": 120,
"velocity": 0.1,
},
"manual_control_once",
mock.call(duration=2000, rotation=120, velocity=0.1),
),
(
SERVICE_CLEAN_ZONE,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"zone": [[123, 123, 123, 123]],
"repeats": 2,
},
"zoned_clean",
mock.call([[123, 123, 123, 123, 2]]),
),
(
SERVICE_GOTO,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"x_coord": 25500,
"y_coord": 26500,
},
"goto",
mock.call(x_coord=25500, y_coord=26500),
),
(
SERVICE_CLEAN_SEGMENT,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"segments": ["1", "2"],
},
"segment_clean",
mock.call(segments=[int(i) for i in ("1", "2")]),
),
(
SERVICE_CLEAN_SEGMENT,
{
ATTR_ENTITY_ID: "vacuum.test_vacuum_cleaner_2",
"segments": 1,
},
"segment_clean",
mock.call(segments=[1]),
),
],
)
async def test_xiaomi_specific_services(
hass: HomeAssistant,
mock_mirobo_is_on,
service,
service_data,
device_method,
device_method_call,
error,
status_calls,
) -> None:
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
# Check [MASK] attributes
[MASK] = hass. [MASK] s.get(entity_id)
assert [MASK] . [MASK] == VacuumActivity.CLEANING
assert [MASK] .attributes.get(ATTR_SUPPORTED_FEATURES) == 14204
assert [MASK] .attributes.get(ATTR_ERROR) is None
assert [MASK] .attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-30"
assert [MASK] .attributes.get(ATTR_TIMERS) == [
{
"enabled": True,
"cron": "5 5 1 8 1",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC),
},
{
"enabled": False,
"cron": "5 5 1 8 2",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=dt_util.UTC),
},
]
# Xiaomi vacuum specific services:
device_method_attr = getattr(mock_mirobo_is_on, device_method)
device_method_attr.side_effect = error
await hass.services.async_call(
XIAOMI_DOMAIN,
service,
service_data,
blocking=True,
)
device_method_attr.assert_has_calls([device_method_call], any_order=True)
mock_mirobo_is_on.assert_has_calls(status_calls, any_order=True)
mock_mirobo_is_on.reset_mock()
async def test_xiaomi_vacuum_fanspeeds(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_mirobo_fanspeeds
) -> None:
"""Test Xiaomi vacuum fanspeeds."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
[MASK] = hass. [MASK] s.get(entity_id)
assert [MASK] .attributes.get(ATTR_FAN_SPEED) == "Silent"
fanspeeds = [MASK] .attributes.get(ATTR_FAN_SPEED_LIST)
for speed in ("Silent", "Standard", "Medium", "Turbo"):
assert speed in fanspeeds
# Set speed service:
await hass.services.async_call(
VACUUM_DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": 60},
blocking=True,
)
mock_mirobo_fanspeeds.assert_has_calls(
[mock.call.set_fan_speed(60)], any_order=True
)
mock_mirobo_fanspeeds.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_fanspeeds.reset_mock()
fan_speed_dict = mock_mirobo_fanspeeds.fan_speed_presets()
await hass.services.async_call(
VACUUM_DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": "Medium"},
blocking=True,
)
mock_mirobo_fanspeeds.assert_has_calls(
[mock.call.set_fan_speed(fan_speed_dict["Medium"])], any_order=True
)
mock_mirobo_fanspeeds.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_fanspeeds.reset_mock()
assert "ERROR" not in caplog.text
await hass.services.async_call(
VACUUM_DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": "invent"},
blocking=True,
)
assert "Fan speed step not recognized" in caplog.text
async def setup_component(hass: HomeAssistant, entity_name: str) -> str:
"""Set up vacuum component."""
entity_id = f"{VACUUM_DOMAIN}.{entity_name}"
config_entry = MockConfigEntry(
domain=XIAOMI_DOMAIN,
unique_id="123456",
title=entity_name,
data={
CONF_FLOW_TYPE: CONF_DEVICE,
CONF_HOST: "192.168.1.100",
CONF_TOKEN: "12345678901234567890123456789012",
CONF_MODEL: MODELS_VACUUM[0],
CONF_MAC: TEST_MAC,
},
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return entity_id
| state |
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import ast
import json
import platform
import zipfile
from collections import OrderedDict, namedtuple
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, PYTHON_VERSION, ROOT, yaml_load
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
from ultralytics.utils.downloads import attempt_download_asset, is_url
def check_class_names(names):
"""
Check class names.
Map imagenet class codes to human-readable names if required. Convert lists to dicts.
"""
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
if isinstance(names, dict):
# Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True'
names = {int(k): str(v) for k, v in names.items()}
n = len(names)
if max(names.keys()) >= n:
raise KeyError(
f"{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices "
f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML."
)
if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764'
names_map = yaml_load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names
names = {k: names_map[v] for k, v in names.items()}
return names
def default_class_names(data=None):
"""Applies default class names to an input YAML file or returns numerical class names."""
if data:
try:
return yaml_load(check_yaml(data))["names"]
except Exception:
pass
return {i: f"class{i}" for i in range(999)} # return default if above errors
class AutoBackend(nn.Module):
"""
Handles dynamic backend selection for running inference using Ultralytics YOLO models.
The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
range of formats, each with specific naming conventions as outlined below:
Supported Formats and Naming Conventions:
| Format | File Suffix |
| --------------------- | ----------------- |
| PyTorch | *.pt |
| TorchScript | *.torchscript |
| ONNX Runtime | *.onnx |
| ONNX OpenCV DNN | *.onnx (dnn=True) |
| OpenVINO | *openvino_model/ |
| CoreML | *.mlpackage |
| TensorRT | *.engine |
| TensorFlow SavedModel | *_saved_model/ |
| TensorFlow GraphDef | *.pb |
| TensorFlow Lite | *.tflite |
| TensorFlow Edge TPU | *_edgetpu.tflite |
| PaddlePaddle | *_paddle_model/ |
| MNN | *.mnn |
| NCNN | *_ncnn_model/ |
| IMX | *_imx_model/ |
| RKNN | *_rknn_model/ |
This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy
models across various platforms.
"""
@torch.no_grad()
def __init__(
self,
weights="yolo11n.pt",
device=torch.device("cpu"),
dnn=False,
data=None,
fp16=False,
batch=1,
fuse=True,
verbose=True,
):
"""
Initialize the AutoBackend for inference.
Args:
weights (str | torch.nn.Module): Path to the model weights file or a module instance. Defaults to 'yolo11n.pt'.
device (torch.device): Device to run the model on. Defaults to CPU.
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
batch (int): Batch-size to assume for inference.
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
verbose (bool): Enable verbose logging. Defaults to True.
"""
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
nn_module = isinstance(weights, torch.nn.Module)
(
pt,
jit,
onnx,
xml,
engine,
coreml,
saved_model,
pb,
tflite,
edgetpu,
tfjs,
paddle,
mnn,
ncnn,
imx,
rknn,
triton,
) = self._model_type(w)
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu or rknn # BHWC formats (vs torch BCWH)
stride = 32 # default stride
end2end = False # default end2end
model, metadata, task = None, None, None
# Set device
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
device = torch.device("cpu")
cuda = False
# Download if not local
if not (pt or triton or nn_module):
w = attempt_download_asset(w)
# In-memory PyTorch model
if nn_module:
model = weights.to(device)
if fuse:
model = model.fuse(verbose=verbose)
if [MASK] (model, "kpt_shape"):
kpt_shape = model.kpt_shape # pose-only
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if [MASK] (model, "module") else model.names # get class names
model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
pt = True
# PyTorch
elif pt:
from ultralytics.nn.tasks import attempt_load_weights
model = attempt_load_weights(
weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse
)
if [MASK] (model, "kpt_shape"):
kpt_shape = model.kpt_shape # pose-only
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if [MASK] (model, "module") else model.names # get class names
model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
# TorchScript
elif jit:
LOGGER.info(f"Loading {w} for TorchScript inference...")
extra_files = {"config.txt": ""} # model metadata
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
model.half() if fp16 else model.float()
if extra_files["config.txt"]: # load metadata dict
metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items()))
# ONNX OpenCV DNN
elif dnn:
LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...")
check_requirements("opencv-python>=4.5.4")
net = cv2.dnn.readNetFromONNX(w)
# ONNX Runtime and IMX
elif onnx or imx:
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
if IS_RASPBERRYPI or IS_JETSON:
# Fix 'numpy.linalg._umath_linalg' has no attribute '_ilp64' for TF SavedModel on RPi and Jetson
check_requirements("numpy==1.23.5")
import onnxruntime
providers = ["CPUExecutionProvider"]
if cuda and "CUDAExecutionProvider" in onnxruntime.get_available_providers():
providers.insert(0, "CUDAExecutionProvider")
elif cuda: # Only log warning if CUDA was requested but unavailable
LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime with CUDA. Using CPU...")
device = torch.device("cpu")
cuda = False
LOGGER.info(f"Using ONNX Runtime {providers[0]}")
if onnx:
session = onnxruntime.InferenceSession(w, providers=providers)
else:
check_requirements(
["model-compression-toolkit==2.1.1", "sony-custom-layers[torch]==0.2.0", "onnxruntime-extensions"]
)
w = next(Path(w).glob("*.onnx"))
LOGGER.info(f"Loading {w} for ONNX IMX inference...")
import mct_quantizers as mctq
from sony_custom_layers.pytorch.object_detection import nms_ort # noqa
session = onnxruntime.InferenceSession(
w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
)
task = "detect"
output_names = [x.name for x in session.get_outputs()]
metadata = session.get_modelmeta().custom_metadata_map
dynamic = isinstance(session.get_outputs()[0].shape[0], str)
fp16 = True if "float16" in session.get_inputs()[0].type else False
if not dynamic:
io = session.io_binding()
bindings = []
for output in session.get_outputs():
out_fp16 = "float16" in output.type
y_tensor = torch.empty(output.shape, dtype=torch.float16 if out_fp16 else torch.float32).to(device)
io.bind_output(
name=output.name,
device_type=device.type,
device_id=device.index if cuda else 0,
element_type=np.float16 if out_fp16 else np.float32,
shape=tuple(y_tensor.shape),
buffer_ptr=y_tensor.data_ptr(),
)
bindings.append(y_tensor)
# OpenVINO
elif xml:
LOGGER.info(f"Loading {w} for OpenVINO inference...")
check_requirements("openvino>=2024.0.0")
import openvino as ov
core = ov.Core()
w = Path(w)
if not w.is_file(): # if not *.xml
w = next(w.glob("*.xml")) # get *.xml file from *_openvino_model dir
ov_model = core.read_model(model=str(w), weights=w.with_suffix(".bin"))
if ov_model.get_parameters()[0].get_layout().empty:
ov_model.get_parameters()[0].set_layout(ov.Layout("NCHW"))
# OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY"
LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch={batch} inference...")
ov_compiled_model = core.compile_model(
ov_model,
device_name="AUTO", # AUTO selects best available device, do not modify
config={"PERFORMANCE_HINT": inference_mode},
)
input_name = ov_compiled_model.input().get_any_name()
metadata = w.parent / "metadata.yaml"
# TensorRT
elif engine:
LOGGER.info(f"Loading {w} for TensorRT inference...")
if IS_JETSON and PYTHON_VERSION <= "3.8.0":
# fix error: `np.bool` was a deprecated alias for the builtin `bool` for JetPack 4 with Python <= 3.8.0
check_requirements("numpy==1.23.5")
try:
import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
except ImportError:
if LINUX:
check_requirements("tensorrt>7.0.0,!=10.1.0")
import tensorrt as trt # noqa
check_version(trt.__version__, ">=7.0.0", hard=True)
check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
if device.type == "cpu":
device = torch.device("cuda:0")
Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr"))
logger = trt.Logger(trt.Logger.INFO)
# Read file
with open(w, "rb") as f, trt.Runtime(logger) as runtime:
try:
meta_len = int.from_bytes(f.read(4), byteorder="little") # read metadata length
metadata = json.loads(f.read(meta_len).decode("utf-8")) # read metadata
except UnicodeDecodeError:
f.seek(0) # engine file may lack embedded Ultralytics metadata
dla = metadata.get("dla", None)
if dla is not None:
runtime.DLA_core = int(dla)
model = runtime.deserialize_cuda_engine(f.read()) # read engine
# Model context
try:
context = model.create_execution_context()
except Exception as e: # model is None
LOGGER.error(f"ERROR: TensorRT model exported with a different version than {trt.__version__}\n")
raise e
bindings = OrderedDict()
output_names = []
fp16 = False # default updated below
dynamic = False
is_trt10 = not [MASK] (model, "num_bindings")
num = range(model.num_io_tensors) if is_trt10 else range(model.num_bindings)
for i in num:
if is_trt10:
name = model.get_tensor_name(i)
dtype = trt.nptype(model.get_tensor_dtype(name))
is_input = model.get_tensor_mode(name) == trt.TensorIOMode.INPUT
if is_input:
if -1 in tuple(model.get_tensor_shape(name)):
dynamic = True
context.set_input_shape(name, tuple(model.get_tensor_profile_shape(name, 0)[1]))
if dtype == np.float16:
fp16 = True
else:
output_names.append(name)
shape = tuple(context.get_tensor_shape(name))
else: # TensorRT < 10.0
name = model.get_binding_name(i)
dtype = trt.nptype(model.get_binding_dtype(i))
is_input = model.binding_is_input(i)
if model.binding_is_input(i):
if -1 in tuple(model.get_binding_shape(i)): # dynamic
dynamic = True
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[1]))
if dtype == np.float16:
fp16 = True
else:
output_names.append(name)
shape = tuple(context.get_binding_shape(i))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size
# CoreML
elif coreml:
LOGGER.info(f"Loading {w} for CoreML inference...")
import coremltools as ct
model = ct.models.MLModel(w)
metadata = dict(model.user_defined_metadata)
# TF SavedModel
elif saved_model:
LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...")
import tensorflow as tf
keras = False # assume TF1 saved_model
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
metadata = Path(w) / "metadata.yaml"
# TF GraphDef
elif pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...")
import tensorflow as tf
from ultralytics.engine.exporter import gd_outputs
def wrap_frozen_graph(gd, inputs, outputs):
"""Wrap frozen graphs for deployment."""
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
ge = x.graph.as_graph_element
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
gd = tf.Graph().as_graph_def() # TF GraphDef
with open(w, "rb") as f:
gd.ParseFromString(f.read())
frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
try: # find metadata in SavedModel alongside GraphDef
metadata = next(Path(w).resolve().parent.rglob(f"{Path(w).stem}_saved_model*/metadata.yaml"))
except StopIteration:
pass
# TFLite or TFLite Edge TPU
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
device = device[3:] if str(device).startswith("tpu") else ":0"
LOGGER.info(f"Loading {w} on device {device[1:]} for TensorFlow Lite Edge TPU inference...")
delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[
platform.system()
]
interpreter = Interpreter(
model_path=w,
experimental_delegates=[load_delegate(delegate, options={"device": device})],
)
device = "cpu" # Required, otherwise PyTorch will try to use the wrong device
else: # TFLite
LOGGER.info(f"Loading {w} for TensorFlow Lite inference...")
interpreter = Interpreter(model_path=w) # load TFLite model
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
# Load metadata
try:
with zipfile.ZipFile(w, "r") as model:
meta_file = model.namelist()[0]
metadata = ast.literal_eval(model.read(meta_file).decode("utf-8"))
except zipfile.BadZipFile:
pass
# TF.js
elif tfjs:
raise NotImplementedError("YOLOv8 TF.js inference is not currently supported.")
# PaddlePaddle
elif paddle:
LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle")
import paddle.inference as pdi # noqa
w = Path(w)
if not w.is_file(): # if not *.pdmodel
w = next(w.rglob("*.pdmodel")) # get *.pdmodel file from *_paddle_model dir
config = pdi.Config(str(w), str(w.with_suffix(".pdiparams")))
if cuda:
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
predictor = pdi.create_predictor(config)
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
output_names = predictor.get_output_names()
metadata = w.parents[1] / "metadata.yaml"
# MNN
elif mnn:
LOGGER.info(f"Loading {w} for MNN inference...")
check_requirements("MNN") # requires MNN
import os
import MNN
config = {"precision": "low", "backend": "CPU", "numThread": (os.cpu_count() + 1) // 2}
rt = MNN.nn.create_runtime_manager((config,))
net = MNN.nn.load_module_from_file(w, [], [], runtime_manager=rt, rearrange=True)
def torch_to_mnn(x):
return MNN.expr.const(x.data_ptr(), x.shape)
metadata = json.loads(net.get_info()["bizCode"])
# NCNN
elif ncnn:
LOGGER.info(f"Loading {w} for NCNN inference...")
check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires NCNN
import ncnn as pyncnn
net = pyncnn.Net()
net.opt.use_vulkan_compute = cuda
w = Path(w)
if not w.is_file(): # if not *.param
w = next(w.glob("*.param")) # get *.param file from *_ncnn_model dir
net.load_param(str(w))
net.load_model(str(w.with_suffix(".bin")))
metadata = w.parent / "metadata.yaml"
# NVIDIA Triton Inference Server
elif triton:
check_requirements("tritonclient[all]")
from ultralytics.utils.triton import TritonRemoteModel
model = TritonRemoteModel(w)
metadata = model.metadata
# RKNN
elif rknn:
if not is_rockchip():
raise OSError("RKNN inference is only supported on Rockchip devices.")
LOGGER.info(f"Loading {w} for RKNN inference...")
check_requirements("rknn-toolkit-lite2")
from rknnlite.api import RKNNLite
w = Path(w)
if not w.is_file(): # if not *.rknn
w = next(w.rglob("*.rknn")) # get *.rknn file from *_rknn_model dir
rknn_model = RKNNLite()
rknn_model.load_rknn(w)
rknn_model.init_runtime()
metadata = Path(w).parent / "metadata.yaml"
# Any other format (unsupported)
else:
from ultralytics.engine.exporter import export_formats
raise TypeError(
f"model='{w}' is not a supported model format. Ultralytics supports: {export_formats()['Format']}\n"
f"See https://docs.ultralytics.com/modes/predict for help."
)
# Load external metadata YAML
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
metadata = yaml_load(metadata)
if metadata and isinstance(metadata, dict):
for k, v in metadata.items():
if k in {"stride", "batch"}:
metadata[k] = int(v)
elif k in {"imgsz", "names", "kpt_shape", "args"} and isinstance(v, str):
metadata[k] = eval(v)
stride = metadata["stride"]
task = metadata["task"]
batch = metadata["batch"]
imgsz = metadata["imgsz"]
names = metadata["names"]
kpt_shape = metadata.get("kpt_shape")
end2end = metadata.get("args", {}).get("nms", False)
elif not (pt or triton or nn_module):
LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'")
# Check names
if "names" not in locals(): # names missing
names = default_class_names(data)
names = check_class_names(names)
# Disable gradients
if pt:
for p in model.parameters():
p.requires_grad = False
self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False, embed=None):
"""
Runs inference on the YOLOv8 MultiBackend model.
Args:
im (torch.Tensor): The image tensor to perform inference on.
augment (bool): whether to perform data augmentation during inference, defaults to False
visualize (bool): whether to visualize the output predictions, defaults to False
embed (list, optional): A list of feature vectors/embeddings to return.
Returns:
(tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True)
"""
b, ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
# PyTorch
if self.pt or self.nn_module:
y = self.model(im, augment=augment, visualize=visualize, embed=embed)
# TorchScript
elif self.jit:
y = self.model(im)
# ONNX OpenCV DNN
elif self.dnn:
im = im.cpu().numpy() # torch to numpy
self.net.setInput(im)
y = self.net.forward()
# ONNX Runtime
elif self.onnx or self.imx:
if self.dynamic:
im = im.cpu().numpy() # torch to numpy
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
else:
if not self.cuda:
im = im.cpu()
self.io.bind_input(
name="images",
device_type=im.device.type,
device_id=im.device.index if im.device.type == "cuda" else 0,
element_type=np.float16 if self.fp16 else np.float32,
shape=tuple(im.shape),
buffer_ptr=im.data_ptr(),
)
self.session.run_with_iobinding(self.io)
y = self.bindings
if self.imx:
# boxes, conf, cls
y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
# OpenVINO
elif self.xml:
im = im.cpu().numpy() # FP32
if self.inference_mode in {"THROUGHPUT", "CUMULATIVE_THROUGHPUT"}: # optimized for larger batch-sizes
n = im.shape[0] # number of images in batch
results = [None] * n # preallocate list with None to match the number of images
def callback(request, userdata):
"""Places result in preallocated list using userdata index."""
results[userdata] = request.results
# Create AsyncInferQueue, set the callback and start asynchronous inference for each input image
async_queue = self.ov.runtime.AsyncInferQueue(self.ov_compiled_model)
async_queue.set_callback(callback)
for i in range(n):
# Start async inference with userdata=i to specify the position in results list
async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW
async_queue.wait_all() # wait for all inference requests to complete
y = np.concatenate([list(r.values())[0] for r in results])
else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1
y = list(self.ov_compiled_model(im).values())
# TensorRT
elif self.engine:
if self.dynamic and im.shape != self.bindings["images"].shape:
if self.is_trt10:
self.context.set_input_shape("images", im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
self.bindings[name].data.resize_(tuple(self.context.get_tensor_shape(name)))
else:
i = self.model.get_binding_index("images")
self.context.set_binding_shape(i, im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
i = self.model.get_binding_index(name)
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
s = self.bindings["images"].shape
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs["images"] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = [self.bindings[x].data for x in sorted(self.output_names)]
# CoreML
elif self.coreml:
im = im[0].cpu().numpy()
im_pil = Image.fromarray((im * 255).astype("uint8"))
# im = im.resize((192, 320), Image.BILINEAR)
y = self.model.predict({"image": im_pil}) # coordinates are xywh normalized
if "confidence" in y:
raise TypeError(
"Ultralytics only supports inference of non-pipelined CoreML models exported with "
f"'nms=False', but 'model={w}' has an NMS pipeline created by an 'nms=True' export."
)
# TODO: CoreML NMS inference handling
# from ultralytics.utils.ops import xywh2xyxy
# box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
# conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float32)
# y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
y = list(y.values())
if len(y) == 2 and len(y[1].shape) != 4: # segmentation model
y = list(reversed(y)) # reversed for segmentation models (pred, proto)
# PaddlePaddle
elif self.paddle:
im = im.cpu().numpy().astype(np.float32)
self.input_handle.copy_from_cpu(im)
self.predictor.run()
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
# MNN
elif self.mnn:
input_var = self.torch_to_mnn(im)
output_var = self.net.onForward([input_var])
y = [x.read() for x in output_var]
# NCNN
elif self.ncnn:
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
with self.net.create_extractor() as ex:
ex.input(self.net.input_names()[0], mat_in)
# WARNING: 'output_names' sorted as a temporary fix for https://github.com/pnnx/pnnx/issues/130
y = [np.array(ex.extract(x)[1])[None] for x in sorted(self.net.output_names())]
# NVIDIA Triton Inference Server
elif self.triton:
im = im.cpu().numpy() # torch to numpy
y = self.model(im)
# RKNN
elif self.rknn:
im = (im.cpu().numpy() * 255).astype("uint8")
im = im if isinstance(im, (list, tuple)) else [im]
y = self.rknn_model.inference(inputs=im)
# TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
else:
im = im.cpu().numpy()
if self.saved_model: # SavedModel
y = self.model(im, training=False) if self.keras else self.model(im)
if not isinstance(y, list):
y = [y]
elif self.pb: # GraphDef
y = self.frozen_func(x=self.tf.constant(im))
else: # Lite or Edge TPU
details = self.input_details[0]
is_int = details["dtype"] in {np.int8, np.int16} # is TFLite quantized int8 or int16 model
if is_int:
scale, zero_point = details["quantization"]
im = (im / scale + zero_point).astype(details["dtype"]) # de-scale
self.interpreter.set_tensor(details["index"], im)
self.interpreter.invoke()
y = []
for output in self.output_details:
x = self.interpreter.get_tensor(output["index"])
if is_int:
scale, zero_point = output["quantization"]
x = (x.astype(np.float32) - zero_point) * scale # re-scale
if x.ndim == 3: # if task is not classification, excluding masks (ndim=4) as well
# Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695
# xywh are normalized in TFLite/EdgeTPU to mitigate quantization error of integer models
if x.shape[-1] == 6 or self.end2end: # end-to-end model
x[:, :, [0, 2]] *= w
x[:, :, [1, 3]] *= h
if self.task == "pose":
x[:, :, 6::3] *= w
x[:, :, 7::3] *= h
else:
x[:, [0, 2]] *= w
x[:, [1, 3]] *= h
if self.task == "pose":
x[:, 5::3] *= w
x[:, 6::3] *= h
y.append(x)
# TF segment fixes: export is reversed vs ONNX export and protos are transposed
if len(y) == 2: # segment with (det, proto) output order reversed
if len(y[1].shape) != 4:
y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32)
if y[1].shape[-1] == 6: # end-to-end model
y = [y[1]]
else:
y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160)
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
# for x in y:
# print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes
if isinstance(y, (list, tuple)):
if len(self.names) == 999 and (self.task == "segment" or len(y) == 2): # segments and names not defined
nc = y[0].shape[1] - y[1].shape[1] - 4 # y = (1, 32, 160, 160), (1, 116, 8400)
self.names = {i: f"class{i}" for i in range(nc)}
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
else:
return self.from_numpy(y)
def from_numpy(self, x):
"""
Convert a numpy array to a tensor.
Args:
x (np.ndarray): The array to be converted.
Returns:
(torch.Tensor): The converted tensor
"""
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
"""
Warm up the model by running one forward pass with a dummy input.
Args:
imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width)
"""
import torchvision # noqa (import here so torchvision import time not recorded in postprocess time)
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module
if any(warmup_types) and (self.device.type != "cpu" or self.triton):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.jit else 1):
self.forward(im) # warmup
@staticmethod
def _model_type(p="path/to/model.pt"):
"""
Takes a path to a model file and returns the model type. Possibles types are pt, jit, onnx, xml, engine, coreml,
saved_model, pb, tflite, edgetpu, tfjs, ncnn or paddle.
Args:
p (str): path to the model file. Defaults to path/to/model.pt
Examples:
>>> model = AutoBackend(weights="path/to/model.onnx")
>>> model_type = model._model_type() # returns "onnx"
"""
from ultralytics.engine.exporter import export_formats
sf = export_formats()["Suffix"] # export suffixes
if not is_url(p) and not isinstance(p, str):
check_suffix(p, sf) # checks
name = Path(p).name
types = [s in name for s in sf]
types[5] |= name.endswith(".mlmodel") # retain support for older Apple CoreML *.mlmodel formats
types[8] &= not types[9] # tflite &= not edgetpu
if any(types):
triton = False
else:
from urllib.parse import urlsplit
url = urlsplit(p)
triton = bool(url.netloc) and bool(url.path) and url.scheme in {"http", "grpc"}
return types + [triton]
| hasattr |
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for image detection export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf, tf_keras
from official.core import exp_factory
from official.projects.deepmac_maskrcnn.serving import detection
class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_detection_module(self, experiment_name, image_size=(640, 640)):
params = exp_factory.get_exp_config(experiment_name)
params.task.model.backbone.resnet.model_id = 18
params.task.model.detection_generator.use_batched_nms = True
detection_module = detection.DetectionModule(
params, batch_size=1, input_image_size=list(image_size))
return detection_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type, batch_size, image_size):
"""Get dummy input for the given input type."""
h, w = image_size
if input_type == 'image_tensor':
return tf.zeros((batch_size, h, w, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue() for b in range(batch_size)]
elif input_type == 'tf_example':
image_tensor = tf.zeros((h, w, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example for b in range(batch_size)]
@parameterized.parameters(
('image_tensor', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 640]),
('image_bytes', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 384]),
('tf_example', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 640]),
)
def test_export(self, input_type, experiment_name, image_size):
self.skipTest('a')
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(experiment_name, image_size)
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
input_type, batch_size=1, image_size=image_size)
processed_images, anchor_boxes, image_info = module._build_inputs(
tf.zeros((224, 224, 3), dtype=tf.uint8))
image_shape = image_info[1, :]
image_shape = tf.expand_dims(image_shape, 0)
processed_images = tf.expand_dims(processed_images, 0)
for l, l_boxes in anchor_boxes.items():
anchor_boxes[l] = tf.expand_dims(l_boxes, 0)
expected_ [MASK] = module.model(
images=processed_images,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
training=False)
[MASK] = detection_fn(tf.constant(images))
self.assertAllClose( [MASK] ['num_detections'].numpy(),
expected_ [MASK] ['num_detections'].numpy())
@parameterized.parameters(
('deep_mask_head_rcnn_resnetfpn_coco', [640, 640], 1),
('deep_mask_head_rcnn_resnetfpn_coco', [640, 640], 5),
('deep_mask_head_rcnn_spinenet_coco', [640, 384], 3),
('deep_mask_head_rcnn_spinenet_coco', [640, 384], 9),
)
def test_export_image_and_boxes(self, experiment_name, image_size, num_boxes):
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(experiment_name)
self._export_from_module(module, 'image_and_boxes_tensor', tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
'image_tensor', batch_size=1, image_size=image_size)
processed_images, anchor_boxes, image_info = module._build_inputs(
tf.zeros(image_size + [3], dtype=tf.uint8))
image_shape = image_info[1, :]
image_shape = image_shape[tf.newaxis]
processed_images = processed_images[tf.newaxis]
image_info = image_info[tf.newaxis]
for l, l_boxes in anchor_boxes.items():
anchor_boxes[l] = tf.expand_dims(l_boxes, 0)
boxes = np.zeros((1, num_boxes, 4), dtype=np.float32)
boxes[:, :, [2, 3]] = 1.0
boxes = tf.constant(boxes)
denormalized_boxes = detection.reverse_input_box_transformation(
boxes, image_info)
expected_ [MASK] = module.model.call_images_and_boxes(
images=processed_images, boxes=denormalized_boxes)
[MASK] = detection_fn(images=tf.constant(images), boxes=boxes)
self.assertAllClose( [MASK] ['detection_masks'].numpy(),
expected_ [MASK] ['detection_masks'].numpy(),
rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
tf.test.main()
| outputs |
"# pyright: reportPropertyTypeMismatch=false\nfrom __future__ import annotations\n\nimport collectio(...TRUNCATED) | getattr |
"# pyright: reportPropertyTypeMismatch=false\nfrom __future__ import annotations\n\nimport collectio(...TRUNCATED) | other |
"import asyncio\nimport os\nimport re\nfrom datetime import datetime\nfrom typing import Union\nfrom(...TRUNCATED) | chunk |
"# pyright: reportPropertyTypeMismatch=false\nfrom __future__ import annotations\n\nimport collectio(...TRUNCATED) | ax_names |
"# coding=utf-8\n# Copyright 2021-2023 HuggingFace Inc.\n#\n# Licensed under the Apache License, Ver(...TRUNCATED) | padding |
End of preview.
No dataset card yet
- Downloads last month
- 0