Spaces:
Running
Running
# Ultralytics ๐ AGPL-3.0 License - https://ultralytics.com/license | |
import gc | |
import math | |
import os | |
import random | |
import time | |
from contextlib import contextmanager | |
from copy import deepcopy | |
from datetime import datetime | |
from pathlib import Path | |
from typing import Union | |
import numpy as np | |
import thop | |
import torch | |
import torch.distributed as dist | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from ultralytics.utils import ( | |
DEFAULT_CFG_DICT, | |
DEFAULT_CFG_KEYS, | |
LOGGER, | |
NUM_THREADS, | |
PYTHON_VERSION, | |
TORCHVISION_VERSION, | |
WINDOWS, | |
__version__, | |
colorstr, | |
) | |
from ultralytics.utils.checks import check_version | |
# Version checks (all default to version>=min_version) | |
TORCH_1_9 = check_version(torch.__version__, "1.9.0") | |
TORCH_1_13 = check_version(torch.__version__, "1.13.0") | |
TORCH_2_0 = check_version(torch.__version__, "2.0.0") | |
TORCH_2_4 = check_version(torch.__version__, "2.4.0") | |
TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0") | |
TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0") | |
TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0") | |
TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0") | |
if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.4.0 on Windows | |
LOGGER.warning( | |
"WARNING โ ๏ธ Known issue with torch==2.4.0 on Windows with CPU, recommend upgrading to torch>=2.4.1 to resolve " | |
"https://github.com/ultralytics/ultralytics/issues/15049" | |
) | |
def torch_distributed_zero_first(local_rank: int): | |
"""Ensures all processes in distributed training wait for the local master (rank 0) to complete a task first.""" | |
initialized = dist.is_available() and dist.is_initialized() | |
if initialized and local_rank not in {-1, 0}: | |
dist.barrier(device_ids=[local_rank]) | |
yield | |
if initialized and local_rank == 0: | |
dist.barrier(device_ids=[local_rank]) | |
def smart_inference_mode(): | |
"""Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator.""" | |
def decorate(fn): | |
"""Applies appropriate torch decorator for inference mode based on torch version.""" | |
if TORCH_1_9 and torch.is_inference_mode_enabled(): | |
return fn # already in inference_mode, act as a pass-through | |
else: | |
return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) | |
return decorate | |
def autocast(enabled: bool, device: str = "cuda"): | |
""" | |
Get the appropriate autocast context manager based on PyTorch version and AMP setting. | |
This function returns a context manager for automatic mixed precision (AMP) training that is compatible with both | |
older and newer versions of PyTorch. It handles the differences in the autocast API between PyTorch versions. | |
Args: | |
enabled (bool): Whether to enable automatic mixed precision. | |
device (str, optional): The device to use for autocast. Defaults to 'cuda'. | |
Returns: | |
(torch.amp.autocast): The appropriate autocast context manager. | |
Note: | |
- For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`. | |
- For older versions, it uses `torch.cuda.autocast`. | |
Example: | |
```python | |
with autocast(amp=True): | |
# Your mixed precision operations here | |
pass | |
``` | |
""" | |
if TORCH_1_13: | |
return torch.amp.autocast(device, enabled=enabled) | |
else: | |
return torch.cuda.amp.autocast(enabled) | |
def get_cpu_info(): | |
"""Return a string with system CPU information, i.e. 'Apple M2'.""" | |
from ultralytics.utils import PERSISTENT_CACHE # avoid circular import error | |
if "cpu_info" not in PERSISTENT_CACHE: | |
try: | |
import cpuinfo # pip install py-cpuinfo | |
k = "brand_raw", "hardware_raw", "arch_string_raw" # keys sorted by preference | |
info = cpuinfo.get_cpu_info() # info dict | |
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown") | |
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "") | |
except Exception: | |
pass | |
return PERSISTENT_CACHE.get("cpu_info", "unknown") | |
def get_gpu_info(index): | |
"""Return a string with system GPU information, i.e. 'Tesla T4, 15102MiB'.""" | |
properties = torch.cuda.get_device_properties(index) | |
return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB" | |
def select_device(device="", batch=0, newline=False, verbose=True): | |
""" | |
Selects the appropriate PyTorch device based on the provided arguments. | |
The function takes a string specifying the device or a torch.device object and returns a torch.device object | |
representing the selected device. The function also validates the number of available devices and raises an | |
exception if the requested device(s) are not available. | |
Args: | |
device (str | torch.device, optional): Device string or torch.device object. | |
Options are 'None', 'cpu', or 'cuda', or '0' or '0,1,2,3'. Defaults to an empty string, which auto-selects | |
the first available GPU, or CPU if no GPU is available. | |
batch (int, optional): Batch size being used in your model. Defaults to 0. | |
newline (bool, optional): If True, adds a newline at the end of the log string. Defaults to False. | |
verbose (bool, optional): If True, logs the device information. Defaults to True. | |
Returns: | |
(torch.device): Selected device. | |
Raises: | |
ValueError: If the specified device is not available or if the batch size is not a multiple of the number of | |
devices when using multiple GPUs. | |
Examples: | |
>>> select_device("cuda:0") | |
device(type='cuda', index=0) | |
>>> select_device("cpu") | |
device(type='cpu') | |
Note: | |
Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use. | |
""" | |
if isinstance(device, torch.device) or str(device).startswith("tpu"): | |
return device | |
s = f"Ultralytics {__version__} ๐ Python-{PYTHON_VERSION} torch-{torch.__version__} " | |
device = str(device).lower() | |
for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ": | |
device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' | |
cpu = device == "cpu" | |
mps = device in {"mps", "mps:0"} # Apple Metal Performance Shaders (MPS) | |
if cpu or mps: | |
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False | |
elif device: # non-cpu device requested | |
if device == "cuda": | |
device = "0" | |
if "," in device: | |
device = ",".join([x for x in device.split(",") if x]) # remove sequential commas, i.e. "0,,1" -> "0,1" | |
visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) | |
os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() | |
if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.split(","))): | |
LOGGER.info(s) | |
install = ( | |
"See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no " | |
"CUDA devices are seen by torch.\n" | |
if torch.cuda.device_count() == 0 | |
else "" | |
) | |
raise ValueError( | |
f"Invalid CUDA 'device={device}' requested." | |
f" Use 'device=cpu' or pass valid CUDA device(s) if available," | |
f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" | |
f"\ntorch.cuda.is_available(): {torch.cuda.is_available()}" | |
f"\ntorch.cuda.device_count(): {torch.cuda.device_count()}" | |
f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" | |
f"{install}" | |
) | |
if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available | |
devices = device.split(",") if device else "0" # i.e. "0,1" -> ["0", "1"] | |
n = len(devices) # device count | |
if n > 1: # multi-GPU | |
if batch < 1: | |
raise ValueError( | |
"AutoBatch with batch<1 not supported for Multi-GPU training, " | |
"please specify a valid batch size, i.e. batch=16." | |
) | |
if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count | |
raise ValueError( | |
f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " | |
f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}." | |
) | |
space = " " * (len(s) + 1) | |
for i, d in enumerate(devices): | |
s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB | |
arg = "cuda:0" | |
elif mps and TORCH_2_0 and torch.backends.mps.is_available(): | |
# Prefer MPS if available | |
s += f"MPS ({get_cpu_info()})\n" | |
arg = "mps" | |
else: # revert to CPU | |
s += f"CPU ({get_cpu_info()})\n" | |
arg = "cpu" | |
if arg in {"cpu", "mps"}: | |
torch.set_num_threads(NUM_THREADS) # reset OMP_NUM_THREADS for cpu training | |
if verbose: | |
LOGGER.info(s if newline else s.rstrip()) | |
return torch.device(arg) | |
def time_sync(): | |
"""PyTorch-accurate time.""" | |
if torch.cuda.is_available(): | |
torch.cuda.synchronize() | |
return time.time() | |
def fuse_conv_and_bn(conv, bn): | |
"""Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/.""" | |
fusedconv = ( | |
nn.Conv2d( | |
conv.in_channels, | |
conv.out_channels, | |
kernel_size=conv.kernel_size, | |
stride=conv.stride, | |
padding=conv.padding, | |
dilation=conv.dilation, | |
groups=conv.groups, | |
bias=True, | |
) | |
.requires_grad_(False) | |
.to(conv.weight.device) | |
) | |
# Prepare filters | |
w_conv = conv.weight.view(conv.out_channels, -1) | |
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) | |
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) | |
# Prepare spatial bias | |
b_conv = torch.zeros(conv.weight.shape[0], device=conv.weight.device) if conv.bias is None else conv.bias | |
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) | |
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) | |
return fusedconv | |
def fuse_deconv_and_bn(deconv, bn): | |
"""Fuse ConvTranspose2d() and BatchNorm2d() layers.""" | |
fuseddconv = ( | |
nn.ConvTranspose2d( | |
deconv.in_channels, | |
deconv.out_channels, | |
kernel_size=deconv.kernel_size, | |
stride=deconv.stride, | |
padding=deconv.padding, | |
output_padding=deconv.output_padding, | |
dilation=deconv.dilation, | |
groups=deconv.groups, | |
bias=True, | |
) | |
.requires_grad_(False) | |
.to(deconv.weight.device) | |
) | |
# Prepare filters | |
w_deconv = deconv.weight.view(deconv.out_channels, -1) | |
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) | |
fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) | |
# Prepare spatial bias | |
b_conv = torch.zeros(deconv.weight.shape[1], device=deconv.weight.device) if deconv.bias is None else deconv.bias | |
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) | |
fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) | |
return fuseddconv | |
def model_info(model, detailed=False, verbose=True, imgsz=640): | |
"""Print and return detailed model information layer by layer.""" | |
if not verbose: | |
return | |
n_p = get_num_params(model) # number of parameters | |
n_g = get_num_gradients(model) # number of gradients | |
n_l = len(list(model.modules())) # number of layers | |
if detailed: | |
LOGGER.info(f"{'layer':>5}{'name':>40}{'gradient':>10}{'parameters':>12}{'shape':>20}{'mu':>10}{'sigma':>10}") | |
for i, (name, p) in enumerate(model.named_parameters()): | |
name = name.replace("module_list.", "") | |
LOGGER.info( | |
f"{i:>5g}{name:>40s}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20s}" | |
f"{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype):>15s}" | |
) | |
flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320] | |
fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else "" | |
fs = f", {flops:.1f} GFLOPs" if flops else "" | |
yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "") | |
model_name = Path(yaml_file).stem.replace("yolo", "YOLO") or "Model" | |
LOGGER.info(f"{model_name} summary{fused}: {n_l:,} layers, {n_p:,} parameters, {n_g:,} gradients{fs}") | |
return n_l, n_p, n_g, flops | |
def get_num_params(model): | |
"""Return the total number of parameters in a YOLO model.""" | |
return sum(x.numel() for x in model.parameters()) | |
def get_num_gradients(model): | |
"""Return the total number of parameters with gradients in a YOLO model.""" | |
return sum(x.numel() for x in model.parameters() if x.requires_grad) | |
def model_info_for_loggers(trainer): | |
""" | |
Return model info dict with useful model information. | |
Example: | |
YOLOv8n info for loggers | |
```python | |
results = { | |
"model/parameters": 3151904, | |
"model/GFLOPs": 8.746, | |
"model/speed_ONNX(ms)": 41.244, | |
"model/speed_TensorRT(ms)": 3.211, | |
"model/speed_PyTorch(ms)": 18.755, | |
} | |
``` | |
""" | |
if trainer.args.profile: # profile ONNX and TensorRT times | |
from ultralytics.utils.benchmarks import ProfileModels | |
results = ProfileModels([trainer.last], device=trainer.device).profile()[0] | |
results.pop("model/name") | |
else: # only return PyTorch times from most recent validation | |
results = { | |
"model/parameters": get_num_params(trainer.model), | |
"model/GFLOPs": round(get_flops(trainer.model), 3), | |
} | |
results["model/speed_PyTorch(ms)"] = round(trainer.validator.speed["inference"], 3) | |
return results | |
def get_flops(model, imgsz=640): | |
"""Return a YOLO model's FLOPs.""" | |
try: | |
model = de_parallel(model) | |
p = next(model.parameters()) | |
if not isinstance(imgsz, list): | |
imgsz = [imgsz, imgsz] # expand if int/float | |
try: | |
# Use stride size for input tensor | |
stride = max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32 # max stride | |
im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format | |
flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # stride GFLOPs | |
return flops * imgsz[0] / stride * imgsz[1] / stride # imgsz GFLOPs | |
except Exception: | |
# Use actual image size for input tensor (i.e. required for RTDETR models) | |
im = torch.empty((1, p.shape[1], *imgsz), device=p.device) # input image in BCHW format | |
return thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # imgsz GFLOPs | |
except Exception: | |
return 0.0 | |
def get_flops_with_torch_profiler(model, imgsz=640): | |
"""Compute model FLOPs (thop package alternative, but 2-10x slower unfortunately).""" | |
if not TORCH_2_0: # torch profiler implemented in torch>=2.0 | |
return 0.0 | |
model = de_parallel(model) | |
p = next(model.parameters()) | |
if not isinstance(imgsz, list): | |
imgsz = [imgsz, imgsz] # expand if int/float | |
try: | |
# Use stride size for input tensor | |
stride = (max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32) * 2 # max stride | |
im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format | |
with torch.profiler.profile(with_flops=True) as prof: | |
model(im) | |
flops = sum(x.flops for x in prof.key_averages()) / 1e9 | |
flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs | |
except Exception: | |
# Use actual image size for input tensor (i.e. required for RTDETR models) | |
im = torch.empty((1, p.shape[1], *imgsz), device=p.device) # input image in BCHW format | |
with torch.profiler.profile(with_flops=True) as prof: | |
model(im) | |
flops = sum(x.flops for x in prof.key_averages()) / 1e9 | |
return flops | |
def initialize_weights(model): | |
"""Initialize model weights to random values.""" | |
for m in model.modules(): | |
t = type(m) | |
if t is nn.Conv2d: | |
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') | |
elif t is nn.BatchNorm2d: | |
m.eps = 1e-3 | |
m.momentum = 0.03 | |
elif t in {nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU}: | |
m.inplace = True | |
def scale_img(img, ratio=1.0, same_shape=False, gs=32): | |
"""Scales and pads an image tensor, optionally maintaining aspect ratio and padding to gs multiple.""" | |
if ratio == 1.0: | |
return img | |
h, w = img.shape[2:] | |
s = (int(h * ratio), int(w * ratio)) # new size | |
img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize | |
if not same_shape: # pad/crop img | |
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) | |
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean | |
def copy_attr(a, b, include=(), exclude=()): | |
"""Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes.""" | |
for k, v in b.__dict__.items(): | |
if (len(include) and k not in include) or k.startswith("_") or k in exclude: | |
continue | |
else: | |
setattr(a, k, v) | |
def get_latest_opset(): | |
"""Return the second-most recent ONNX opset version supported by this version of PyTorch, adjusted for maturity.""" | |
if TORCH_1_13: | |
# If the PyTorch>=1.13, dynamically compute the latest opset minus one using 'symbolic_opset' | |
return max(int(k[14:]) for k in vars(torch.onnx) if "symbolic_opset" in k) - 1 | |
# Otherwise for PyTorch<=1.12 return the corresponding predefined opset | |
version = torch.onnx.producer_version.rsplit(".", 1)[0] # i.e. '2.3' | |
return {"1.12": 15, "1.11": 14, "1.10": 13, "1.9": 12, "1.8": 12}.get(version, 12) | |
def intersect_dicts(da, db, exclude=()): | |
"""Returns a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.""" | |
return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} | |
def is_parallel(model): | |
"""Returns True if model is of type DP or DDP.""" | |
return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) | |
def de_parallel(model): | |
"""De-parallelize a model: returns single-GPU model if model is of type DP or DDP.""" | |
return model.module if is_parallel(model) else model | |
def one_cycle(y1=0.0, y2=1.0, steps=100): | |
"""Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.""" | |
return lambda x: max((1 - math.cos(x * math.pi / steps)) / 2, 0) * (y2 - y1) + y1 | |
def init_seeds(seed=0, deterministic=False): | |
"""Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.""" | |
random.seed(seed) | |
np.random.seed(seed) | |
torch.manual_seed(seed) | |
torch.cuda.manual_seed(seed) | |
torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe | |
# torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 | |
if deterministic: | |
if TORCH_2_0: | |
torch.use_deterministic_algorithms(True, warn_only=True) # warn if deterministic is not possible | |
torch.backends.cudnn.deterministic = True | |
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" | |
os.environ["PYTHONHASHSEED"] = str(seed) | |
else: | |
LOGGER.warning("WARNING โ ๏ธ Upgrade to torch>=2.0.0 for deterministic training.") | |
else: | |
torch.use_deterministic_algorithms(False) | |
torch.backends.cudnn.deterministic = False | |
class ModelEMA: | |
""" | |
Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models. Keeps a moving | |
average of everything in the model state_dict (parameters and buffers). | |
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage | |
To disable EMA set the `enabled` attribute to `False`. | |
""" | |
def __init__(self, model, decay=0.9999, tau=2000, updates=0): | |
"""Initialize EMA for 'model' with given arguments.""" | |
self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA | |
self.updates = updates # number of EMA updates | |
self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) | |
for p in self.ema.parameters(): | |
p.requires_grad_(False) | |
self.enabled = True | |
def update(self, model): | |
"""Update EMA parameters.""" | |
if self.enabled: | |
self.updates += 1 | |
d = self.decay(self.updates) | |
msd = de_parallel(model).state_dict() # model state_dict | |
for k, v in self.ema.state_dict().items(): | |
if v.dtype.is_floating_point: # true for FP16 and FP32 | |
v *= d | |
v += (1 - d) * msd[k].detach() | |
# assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' | |
def update_attr(self, model, include=(), exclude=("process_group", "reducer")): | |
"""Updates attributes and saves stripped model with optimizer removed.""" | |
if self.enabled: | |
copy_attr(self.ema, model, include, exclude) | |
def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: dict = None) -> dict: | |
""" | |
Strip optimizer from 'f' to finalize training, optionally save as 's'. | |
Args: | |
f (str): file path to model to strip the optimizer from. Default is 'best.pt'. | |
s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten. | |
updates (dict): a dictionary of updates to overlay onto the checkpoint before saving. | |
Returns: | |
(dict): The combined checkpoint dictionary. | |
Example: | |
```python | |
from pathlib import Path | |
from ultralytics.utils.torch_utils import strip_optimizer | |
for f in Path("path/to/model/checkpoints").rglob("*.pt"): | |
strip_optimizer(f) | |
``` | |
Note: | |
Use `ultralytics.nn.torch_safe_load` for missing modules with `x = torch_safe_load(f)[0]` | |
""" | |
try: | |
x = torch.load(f, map_location=torch.device("cpu")) | |
assert isinstance(x, dict), "checkpoint is not a Python dictionary" | |
assert "model" in x, "'model' missing from checkpoint" | |
except Exception as e: | |
LOGGER.warning(f"WARNING โ ๏ธ Skipping {f}, not a valid Ultralytics model: {e}") | |
return {} | |
metadata = { | |
"date": datetime.now().isoformat(), | |
"version": __version__, | |
"license": "AGPL-3.0 License (https://ultralytics.com/license)", | |
"docs": "https://docs.ultralytics.com", | |
} | |
# Update model | |
if x.get("ema"): | |
x["model"] = x["ema"] # replace model with EMA | |
if hasattr(x["model"], "args"): | |
x["model"].args = dict(x["model"].args) # convert from IterableSimpleNamespace to dict | |
if hasattr(x["model"], "criterion"): | |
x["model"].criterion = None # strip loss criterion | |
x["model"].half() # to FP16 | |
for p in x["model"].parameters(): | |
p.requires_grad = False | |
# Update other keys | |
args = {**DEFAULT_CFG_DICT, **x.get("train_args", {})} # combine args | |
for k in "optimizer", "best_fitness", "ema", "updates": # keys | |
x[k] = None | |
x["epoch"] = -1 | |
x["train_args"] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys | |
# x['model'].args = x['train_args'] | |
# Save | |
combined = {**metadata, **x, **(updates or {})} | |
torch.save(combined, s or f) # combine dicts (prefer to the right) | |
mb = os.path.getsize(s or f) / 1e6 # file size | |
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") | |
return combined | |
def convert_optimizer_state_dict_to_fp16(state_dict): | |
""" | |
Converts the state_dict of a given optimizer to FP16, focusing on the 'state' key for tensor conversions. | |
This method aims to reduce storage size without altering 'param_groups' as they contain non-tensor data. | |
""" | |
for state in state_dict["state"].values(): | |
for k, v in state.items(): | |
if k != "step" and isinstance(v, torch.Tensor) and v.dtype is torch.float32: | |
state[k] = v.half() | |
return state_dict | |
def cuda_memory_usage(device=None): | |
""" | |
Monitor and manage CUDA memory usage. | |
This function checks if CUDA is available and, if so, empties the CUDA cache to free up unused memory. | |
It then yields a dictionary containing memory usage information, which can be updated by the caller. | |
Finally, it updates the dictionary with the amount of memory reserved by CUDA on the specified device. | |
Args: | |
device (torch.device, optional): The CUDA device to query memory usage for. Defaults to None. | |
Yields: | |
(dict): A dictionary with a key 'memory' initialized to 0, which will be updated with the reserved memory. | |
""" | |
cuda_info = dict(memory=0) | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
try: | |
yield cuda_info | |
finally: | |
cuda_info["memory"] = torch.cuda.memory_reserved(device) | |
else: | |
yield cuda_info | |
def profile(input, ops, n=10, device=None, max_num_obj=0): | |
""" | |
Ultralytics speed, memory and FLOPs profiler. | |
Example: | |
```python | |
from ultralytics.utils.torch_utils import profile | |
input = torch.randn(16, 3, 640, 640) | |
m1 = lambda x: x * torch.sigmoid(x) | |
m2 = nn.SiLU() | |
profile(input, [m1, m2], n=100) # profile over 100 iterations | |
``` | |
""" | |
results = [] | |
if not isinstance(device, torch.device): | |
device = select_device(device) | |
LOGGER.info( | |
f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" | |
f"{'input':>24s}{'output':>24s}" | |
) | |
gc.collect() # attempt to free unused memory | |
torch.cuda.empty_cache() | |
for x in input if isinstance(input, list) else [input]: | |
x = x.to(device) | |
x.requires_grad = True | |
for m in ops if isinstance(ops, list) else [ops]: | |
m = m.to(device) if hasattr(m, "to") else m # device | |
m = m.half() if hasattr(m, "half") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m | |
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward | |
try: | |
flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1e9 * 2 # GFLOPs | |
except Exception: | |
flops = 0 | |
try: | |
mem = 0 | |
for _ in range(n): | |
with cuda_memory_usage(device) as cuda_info: | |
t[0] = time_sync() | |
y = m(x) | |
t[1] = time_sync() | |
try: | |
(sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() | |
t[2] = time_sync() | |
except Exception: # no backward method | |
# print(e) # for debug | |
t[2] = float("nan") | |
mem += cuda_info["memory"] / 1e9 # (GB) | |
tf += (t[1] - t[0]) * 1000 / n # ms per op forward | |
tb += (t[2] - t[1]) * 1000 / n # ms per op backward | |
if max_num_obj: # simulate training with predictions per image grid (for AutoBatch) | |
with cuda_memory_usage(device) as cuda_info: | |
torch.randn( | |
x.shape[0], | |
max_num_obj, | |
int(sum((x.shape[-1] / s) * (x.shape[-2] / s) for s in m.stride.tolist())), | |
device=device, | |
dtype=torch.float32, | |
) | |
mem += cuda_info["memory"] / 1e9 # (GB) | |
s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else "list" for x in (x, y)) # shapes | |
p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters | |
LOGGER.info(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}") | |
results.append([p, flops, mem, tf, tb, s_in, s_out]) | |
except Exception as e: | |
LOGGER.info(e) | |
results.append(None) | |
finally: | |
gc.collect() # attempt to free unused memory | |
torch.cuda.empty_cache() | |
return results | |
class EarlyStopping: | |
"""Early stopping class that stops training when a specified number of epochs have passed without improvement.""" | |
def __init__(self, patience=50): | |
""" | |
Initialize early stopping object. | |
Args: | |
patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. | |
""" | |
self.best_fitness = 0.0 # i.e. mAP | |
self.best_epoch = 0 | |
self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop | |
self.possible_stop = False # possible stop may occur next epoch | |
def __call__(self, epoch, fitness): | |
""" | |
Check whether to stop training. | |
Args: | |
epoch (int): Current epoch of training | |
fitness (float): Fitness value of current epoch | |
Returns: | |
(bool): True if training should stop, False otherwise | |
""" | |
if fitness is None: # check if fitness=None (happens when val=False) | |
return False | |
if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training | |
self.best_epoch = epoch | |
self.best_fitness = fitness | |
delta = epoch - self.best_epoch # epochs without improvement | |
self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch | |
stop = delta >= self.patience # stop training if patience exceeded | |
if stop: | |
prefix = colorstr("EarlyStopping: ") | |
LOGGER.info( | |
f"{prefix}Training stopped early as no improvement observed in last {self.patience} epochs. " | |
f"Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n" | |
f"To update EarlyStopping(patience={self.patience}) pass a new patience value, " | |
f"i.e. `patience=300` or use `patience=0` to disable EarlyStopping." | |
) | |
return stop | |
class FXModel(nn.Module): | |
""" | |
A custom model class for torch.fx compatibility. | |
This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph manipulation. | |
It copies attributes from an existing model and explicitly sets the model attribute to ensure proper copying. | |
Args: | |
model (torch.nn.Module): The original model to wrap for torch.fx compatibility. | |
""" | |
def __init__(self, model): | |
""" | |
Initialize the FXModel. | |
Args: | |
model (torch.nn.Module): The original model to wrap for torch.fx compatibility. | |
""" | |
super().__init__() | |
copy_attr(self, model) | |
# Explicitly set `model` since `copy_attr` somehow does not copy it. | |
self.model = model.model | |
def forward(self, x): | |
""" | |
Forward pass through the model. | |
This method performs the forward pass through the model, handling the dependencies between layers and saving intermediate outputs. | |
Args: | |
x (torch.Tensor): The input tensor to the model. | |
Returns: | |
(torch.Tensor): The output tensor from the model. | |
""" | |
y = [] # outputs | |
for m in self.model: | |
if m.f != -1: # if not from previous layer | |
# from earlier layers | |
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] | |
x = m(x) # run | |
y.append(x) # save output | |
return x | |