diff --git a/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58e416a46171f9302a73fbf3e20e57321072f394 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6867a3fecfbd65019327cee6d51c5b2c299a220 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60434e9754e1edb14cfac724cb02016088a43986 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06b56c7cfbc7e3dddd4bb2ce776f5e5f87d0f6d Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ce7dcf3e1ee9ad9e02f8a43827d25deaa937b17 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f249a67bba88e724b0c0ee4231d1df64f26a233 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b96e2251bf942291fc3eaaa5edb6e9e96310881e Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31513a70391cf565d2730270a4d49aeb8eccd192 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..499774bb8b219a4b011a21d750ba44d049ed53f1 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..179cc214d587335bb7e4604d2b700a2e33a0ce7e Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04765a9d1c1bc90c95067449d1a1317f5a7de706 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2ded3acdb79f1491a32c5019b046cbb18e0c54d Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b500298b1e046909fd662997ea36a3be6e71bd9 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3940460964502f5a6cd21e8708f9a661c50585a6 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bf2d58e3c9f27b504f8c51170a401cd7508ed57 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7005f9cfb259c41096dee60c5bf2a284c44d9d7 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e00018c626eede834dd2232b38ea20bd0c2fb63 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30c3475821a576449bb3dc22943e72480d80fc86 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba86c58aaed1b3b19576efbfb4c9372541850f4 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5182fc524d88ab8dd9c510d5d81d63bf0f432a99 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1242897079aca3314dccc5aaed2e23664a8a988 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab071593160f59e61407aa78906bac2bef5211af Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/create_norm.py b/pytorch-image-models/timm/layers/create_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..74e893d8fc57a894d8c8b2014b6a6cd3568f8fb1 --- /dev/null +++ b/pytorch-image-models/timm/layers/create_norm.py @@ -0,0 +1,58 @@ +""" Norm Layer Factory + +Create norm modules by string (to mirror create_act and creat_norm-act fns) + +Copyright 2022 Ross Wightman +""" +import functools +import types +from typing import Type + +import torch.nn as nn + +from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d +from torchvision.ops.misc import FrozenBatchNorm2d + +_NORM_MAP = dict( + batchnorm=nn.BatchNorm2d, + batchnorm2d=nn.BatchNorm2d, + batchnorm1d=nn.BatchNorm1d, + groupnorm=GroupNorm, + groupnorm1=GroupNorm1, + layernorm=LayerNorm, + layernorm2d=LayerNorm2d, + rmsnorm=RmsNorm, + rmsnorm2d=RmsNorm2d, + frozenbatchnorm2d=FrozenBatchNorm2d, +) +_NORM_TYPES = {m for n, m in _NORM_MAP.items()} + + +def create_norm_layer(layer_name, num_features, **kwargs): + layer = get_norm_layer(layer_name) + layer_instance = layer(num_features, **kwargs) + return layer_instance + + +def get_norm_layer(norm_layer): + if norm_layer is None: + return None + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + norm_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + if not norm_layer: + return None + layer_name = norm_layer.replace('_', '').lower() + norm_layer = _NORM_MAP[layer_name] + else: + norm_layer = norm_layer + + if norm_kwargs: + norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args + return norm_layer diff --git a/pytorch-image-models/timm/layers/create_norm_act.py b/pytorch-image-models/timm/layers/create_norm_act.py new file mode 100644 index 0000000000000000000000000000000000000000..f053a7e0aedc97a7450cf9d9ea22c9ebf59dd88a --- /dev/null +++ b/pytorch-image-models/timm/layers/create_norm_act.py @@ -0,0 +1,95 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +from .evo_norm import * +from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d +from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d +from .inplace_abn import InplaceAbn + +_NORM_ACT_MAP = dict( + batchnorm=BatchNormAct2d, + batchnorm2d=BatchNormAct2d, + groupnorm=GroupNormAct, + groupnorm1=functools.partial(GroupNormAct, num_groups=1), + layernorm=LayerNormAct, + layernorm2d=LayerNormAct2d, + evonormb0=EvoNorm2dB0, + evonormb1=EvoNorm2dB1, + evonormb2=EvoNorm2dB2, + evonorms0=EvoNorm2dS0, + evonorms0a=EvoNorm2dS0a, + evonorms1=EvoNorm2dS1, + evonorms1a=EvoNorm2dS1a, + evonorms2=EvoNorm2dS2, + evonorms2a=EvoNorm2dS2a, + frn=FilterResponseNormAct2d, + frntlu=FilterResponseNormTlu2d, + inplaceabn=InplaceAbn, + iabn=InplaceAbn, +) +_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} +# has act_layer arg to define act type +_NORM_ACT_REQUIRES_ARG = { + BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} + + +def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): + layer = get_norm_act_layer(layer_name, act_layer=act_layer) + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def get_norm_act_layer(norm_layer, act_layer=None): + if norm_layer is None: + return None + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + if not norm_layer: + return None + layer_name = norm_layer.replace('_', '').lower().split('-')[0] + norm_act_layer = _NORM_ACT_MAP[layer_name] + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + elif type_name.startswith('groupnorm1'): + norm_act_layer = functools.partial(GroupNormAct, num_groups=1) + elif type_name.startswith('layernorm2d'): + norm_act_layer = LayerNormAct2d + elif type_name.startswith('layernorm'): + norm_act_layer = LayerNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/pytorch-image-models/timm/layers/drop.py b/pytorch-image-models/timm/layers/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..289245f5adab275ffc38de3b01fdb23f9e491593 --- /dev/null +++ b/pytorch-image-models/timm/layers/drop.py @@ -0,0 +1,182 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .grid import ndgrid + + +def drop_block_2d( + x, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False +): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, +): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + + def __init__( + self, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False, + fast: bool = True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f'drop_prob={round(self.drop_prob,3):0.3f}' diff --git a/pytorch-image-models/timm/layers/eca.py b/pytorch-image-models/timm/layers/eca.py new file mode 100644 index 0000000000000000000000000000000000000000..e29be6ac3c95bb61229cdcdd659ec89d541f1a53 --- /dev/null +++ b/pytorch-image-models/timm/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/pytorch-image-models/timm/layers/grid.py b/pytorch-image-models/timm/layers/grid.py new file mode 100644 index 0000000000000000000000000000000000000000..f760d761fd8188fe9d979027c988d6ce8ec90169 --- /dev/null +++ b/pytorch-image-models/timm/layers/grid.py @@ -0,0 +1,49 @@ +from typing import Tuple + +import torch + + +def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]: + """generate N-D grid in dimension order. + + The ndgrid function is like meshgrid except that the order of the first two input arguments are switched. + + That is, the statement + [X1,X2,X3] = ndgrid(x1,x2,x3) + + produces the same result as + + [X2,X1,X3] = meshgrid(x2,x1,x3) + + This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make + torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy'). + + """ + try: + return torch.meshgrid(*tensors, indexing='ij') + except TypeError: + # old PyTorch < 1.10 will follow this path as it does not have indexing arg, + # the old behaviour of meshgrid was 'ij' + return torch.meshgrid(*tensors) + + +def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]: + """generate N-D grid in spatial dim order. + + The meshgrid function is similar to ndgrid except that the order of the + first two input and output arguments is switched. + + That is, the statement + + [X,Y,Z] = meshgrid(x,y,z) + produces the same result as + + [Y,X,Z] = ndgrid(y,x,z) + Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space, + while ndgrid is better suited to multidimensional problems that aren't spatially based. + """ + + # NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have + # capability of generating grid in xy order before then. + return torch.meshgrid(*tensors, indexing='xy') + diff --git a/pytorch-image-models/timm/layers/grn.py b/pytorch-image-models/timm/layers/grn.py new file mode 100644 index 0000000000000000000000000000000000000000..ae71e013fc97bbdb4bcfcc522b9a9b36920b4efa --- /dev/null +++ b/pytorch-image-models/timm/layers/grn.py @@ -0,0 +1,39 @@ +""" Global Response Normalization Module + +Based on the GRN layer presented in +`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 + +This implementation +* works for both NCHW and NHWC tensor layouts +* uses affine param names matching existing torch norm layers +* slightly improves eager mode performance via fused addcmul + +Hacked together by / Copyright 2023 Ross Wightman +""" + +import torch +from torch import nn as nn + + +class GlobalResponseNorm(nn.Module): + """ Global Response Normalization layer + """ + def __init__(self, dim, eps=1e-6, channels_last=True): + super().__init__() + self.eps = eps + if channels_last: + self.spatial_dim = (1, 2) + self.channel_dim = -1 + self.wb_shape = (1, 1, 1, -1) + else: + self.spatial_dim = (2, 3) + self.channel_dim = 1 + self.wb_shape = (1, -1, 1, 1) + + self.weight = nn.Parameter(torch.zeros(dim)) + self.bias = nn.Parameter(torch.zeros(dim)) + + def forward(self, x): + x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) + x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) + return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n) diff --git a/pytorch-image-models/timm/layers/helpers.py b/pytorch-image-models/timm/layers/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..b003f48d845761fbed4230b3af4092ae48bfe6b9 --- /dev/null +++ b/pytorch-image-models/timm/layers/helpers.py @@ -0,0 +1,43 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return tuple(x) + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v + + +def extend_tuple(x, n): + # pads a tuple to specified n by padding with last value + if not isinstance(x, (tuple, list)): + x = (x,) + else: + x = tuple(x) + pad_n = n - len(x) + if pad_n <= 0: + return x[:n] + return x + (x[-1],) * pad_n diff --git a/pytorch-image-models/timm/layers/interpolate.py b/pytorch-image-models/timm/layers/interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..adba9342ec03a9fd9ad3186f73133325892ad1d9 --- /dev/null +++ b/pytorch-image-models/timm/layers/interpolate.py @@ -0,0 +1,68 @@ +""" Interpolation helpers for timm layers + +RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations +Copyright Shane Barratt, Apache 2.0 license +""" +import torch +from itertools import product + + +class RegularGridInterpolator: + """ Interpolate data defined on a rectilinear grid with even or uneven spacing. + Produces similar results to scipy RegularGridInterpolator or interp2d + in 'linear' mode. + + Taken from https://github.com/sbarratt/torch_interpolations + """ + + def __init__(self, points, values): + self.points = points + self.values = values + + assert isinstance(self.points, tuple) or isinstance(self.points, list) + assert isinstance(self.values, torch.Tensor) + + self.ms = list(self.values.shape) + self.n = len(self.points) + + assert len(self.ms) == self.n + + for i, p in enumerate(self.points): + assert isinstance(p, torch.Tensor) + assert p.shape[0] == self.values.shape[i] + + def __call__(self, points_to_interp): + assert self.points is not None + assert self.values is not None + + assert len(points_to_interp) == len(self.points) + K = points_to_interp[0].shape[0] + for x in points_to_interp: + assert x.shape[0] == K + + idxs = [] + dists = [] + overalls = [] + for p, x in zip(self.points, points_to_interp): + idx_right = torch.bucketize(x, p) + idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1 + idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1) + dist_left = x - p[idx_left] + dist_right = p[idx_right] - x + dist_left[dist_left < 0] = 0. + dist_right[dist_right < 0] = 0. + both_zero = (dist_left == 0) & (dist_right == 0) + dist_left[both_zero] = dist_right[both_zero] = 1. + + idxs.append((idx_left, idx_right)) + dists.append((dist_left, dist_right)) + overalls.append(dist_left + dist_right) + + numerator = 0. + for indexer in product([0, 1], repeat=self.n): + as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)] + bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)] + numerator += self.values[as_s] * \ + torch.prod(torch.stack(bs_s), dim=0) + denominator = torch.prod(torch.stack(overalls), dim=0) + return numerator / denominator diff --git a/pytorch-image-models/timm/layers/linear.py b/pytorch-image-models/timm/layers/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..38fe3380b067ea0b275c45ffd689afdeb4598f3c --- /dev/null +++ b/pytorch-image-models/timm/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/pytorch-image-models/timm/layers/median_pool.py b/pytorch-image-models/timm/layers/median_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..40bd71a7a3840aaebefd2af0a99605b845054cd7 --- /dev/null +++ b/pytorch-image-models/timm/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/pytorch-image-models/timm/loss/__init__.py b/pytorch-image-models/timm/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7f15f2f79673c962f68d6d4b06898e73ac1df6 --- /dev/null +++ b/pytorch-image-models/timm/loss/__init__.py @@ -0,0 +1,4 @@ +from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel +from .binary_cross_entropy import BinaryCrossEntropy +from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from .jsd import JsdCrossEntropy diff --git a/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6408509a5f297a0d0b3a3cebf81f78af0ad3b76f Binary files /dev/null and b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6b5c1fe5577138e182064217c14ffaff83412d4 Binary files /dev/null and b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaafecddbe8c031ac813baaa86a5b77843ad46cd Binary files /dev/null and b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cd445e99a5a206fe1df551800a24fb9ca1f4e7a Binary files /dev/null and b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0dd859549d0641299a59e922c7dec8309092014 Binary files /dev/null and b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/binary_cross_entropy.py b/pytorch-image-models/timm/loss/binary_cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..4e0d685b76ba80f31e6d2768c5d6c52733767fe8 --- /dev/null +++ b/pytorch-image-models/timm/loss/binary_cross_entropy.py @@ -0,0 +1,65 @@ +""" Binary Cross Entropy w/ a few extras + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BinaryCrossEntropy(nn.Module): + """ BCE with optional one-hot from dense targets, label smoothing, thresholding + NOTE for experiments comparing CE to BCE /w label smoothing, may remove + """ + def __init__( + self, + smoothing=0.1, + target_threshold: Optional[float] = None, + weight: Optional[torch.Tensor] = None, + reduction: str = 'mean', + sum_classes: bool = False, + pos_weight: Optional[Union[torch.Tensor, float]] = None, + ): + super(BinaryCrossEntropy, self).__init__() + assert 0. <= smoothing < 1.0 + if pos_weight is not None: + if not isinstance(pos_weight, torch.Tensor): + pos_weight = torch.tensor(pos_weight) + self.smoothing = smoothing + self.target_threshold = target_threshold + self.reduction = 'none' if sum_classes else reduction + self.sum_classes = sum_classes + self.register_buffer('weight', weight) + self.register_buffer('pos_weight', pos_weight) + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + batch_size = x.shape[0] + assert batch_size == target.shape[0] + + if target.shape != x.shape: + # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse + num_classes = x.shape[-1] + # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ + off_value = self.smoothing / num_classes + on_value = 1. - self.smoothing + off_value + target = target.long().view(-1, 1) + target = torch.full( + (batch_size, num_classes), + off_value, + device=x.device, dtype=x.dtype).scatter_(1, target, on_value) + + if self.target_threshold is not None: + # Make target 0, or 1 if threshold set + target = target.gt(self.target_threshold).to(dtype=target.dtype) + + loss = F.binary_cross_entropy_with_logits( + x, target, + self.weight, + pos_weight=self.pos_weight, + reduction=self.reduction, + ) + if self.sum_classes: + loss = loss.sum(-1).mean() + return loss diff --git a/pytorch-image-models/timm/loss/cross_entropy.py b/pytorch-image-models/timm/loss/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..85198107f3ad2a1ff775a677d77c03569ff5d04d --- /dev/null +++ b/pytorch-image-models/timm/loss/cross_entropy.py @@ -0,0 +1,36 @@ +""" Cross Entropy w/ smoothing or soft targets + +Hacked together by / Copyright 2021 Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LabelSmoothingCrossEntropy(nn.Module): + """ NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() + + +class SoftTargetCrossEntropy(nn.Module): + + def __init__(self): + super(SoftTargetCrossEntropy, self).__init__() + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) + return loss.mean() diff --git a/pytorch-image-models/timm/loss/jsd.py b/pytorch-image-models/timm/loss/jsd.py new file mode 100644 index 0000000000000000000000000000000000000000..dd64e156c23d27aa03817a587ae367e8175fc126 --- /dev/null +++ b/pytorch-image-models/timm/loss/jsd.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .cross_entropy import LabelSmoothingCrossEntropy + + +class JsdCrossEntropy(nn.Module): + """ Jensen-Shannon Divergence + Cross-Entropy Loss + + Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + + Hacked together by / Copyright 2020 Ross Wightman + """ + def __init__(self, num_splits=3, alpha=12, smoothing=0.1): + super().__init__() + self.num_splits = num_splits + self.alpha = alpha + if smoothing is not None and smoothing > 0: + self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) + else: + self.cross_entropy_loss = torch.nn.CrossEntropyLoss() + + def __call__(self, output, target): + split_size = output.shape[0] // self.num_splits + assert split_size * self.num_splits == output.shape[0] + logits_split = torch.split(output, split_size) + + # Cross-entropy is only computed on clean images + loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) + probs = [F.softmax(logits, dim=1) for logits in logits_split] + + # Clamp mixture distribution to avoid exploding KL divergence + logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() + loss += self.alpha * sum([F.kl_div( + logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) + return loss diff --git a/pytorch-image-models/timm/models/__init__.py b/pytorch-image-models/timm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c5b1984f20a7bf9a144c885df7e59304908c8276 --- /dev/null +++ b/pytorch-image-models/timm/models/__init__.py @@ -0,0 +1,100 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .convmixer import * +from .convnext import * +from .crossvit import * +from .cspnet import * +from .davit import * +from .deit import * +from .densenet import * +from .dla import * +from .dpn import * +from .edgenext import * +from .efficientformer import * +from .efficientformer_v2 import * +from .efficientnet import * +from .efficientvit_mit import * +from .efficientvit_msra import * +from .eva import * +from .fastvit import * +from .focalnet import * +from .gcvit import * +from .ghostnet import * +from .hardcorenas import * +from .hgnet import * +from .hiera import * +from .hieradet_sam2 import * +from .hrnet import * +from .inception_next import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .maxxvit import * +from .mambaout import * +from .metaformer import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .mobilevit import * +from .mvitv2 import * +from .nasnet import * +from .nest import * +from .nextvit import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .pvt_v2 import * +from .rdnet import * +from .regnet import * +from .repghost import * +from .repvit import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sequencer import * +from .sknet import * +from .swin_transformer import * +from .swin_transformer_v2 import * +from .swin_transformer_v2_cr import * +from .tiny_vit import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vision_transformer_relpos import * +from .vision_transformer_sam import * +from .vitamin import * +from .volo import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, \ + set_pretrained_download_progress, set_pretrained_check_hash +from ._factory import create_model, parse_model_name, safe_model_name +from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet +from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, get_graph_node_names, \ + register_notrace_module, is_notrace_module, get_notrace_modules, \ + register_notrace_function, is_notrace_function, get_notrace_functions +from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint +from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub +from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, \ + group_modules, group_parameters, checkpoint_seq, adapt_input_conv +from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg +from ._prune import adapt_model_from_string +from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, \ + register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, \ + is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value, \ + get_arch_pretrained_cfgs diff --git a/pytorch-image-models/timm/models/_builder.py b/pytorch-image-models/timm/models/_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..482d370a945cb25bd53400ad547103070db037e9 --- /dev/null +++ b/pytorch-image-models/timm/models/_builder.py @@ -0,0 +1,482 @@ +import dataclasses +import logging +import os +from copy import deepcopy +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from torch import nn as nn +from torch.hub import load_state_dict_from_url + +from timm.models._features import FeatureListNet, FeatureDictNet, FeatureHookNet, FeatureGetterNet +from timm.models._features_fx import FeatureGraphNet +from timm.models._helpers import load_state_dict +from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf,\ + load_custom_from_hf +from timm.models._manipulate import adapt_input_conv +from timm.models._pretrained import PretrainedCfg +from timm.models._prune import adapt_model_from_file +from timm.models._registry import get_pretrained_cfg + +_logger = logging.getLogger(__name__) + +# Global variables for rarely used pretrained checkpoint download progress and hash check. +# Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle. +_DOWNLOAD_PROGRESS = False +_CHECK_HASH = False +_USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0 + +__all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained', + 'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg'] + + +def _resolve_pretrained_source(pretrained_cfg): + cfg_source = pretrained_cfg.get('source', '') + pretrained_url = pretrained_cfg.get('url', None) + pretrained_file = pretrained_cfg.get('file', None) + pretrained_sd = pretrained_cfg.get('state_dict', None) + hf_hub_id = pretrained_cfg.get('hf_hub_id', None) + + # resolve where to load pretrained weights from + load_from = '' + pretrained_loc = '' + if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): + # hf-hub specified as source via model identifier + load_from = 'hf-hub' + assert hf_hub_id + pretrained_loc = hf_hub_id + else: + # default source == timm or unspecified + if pretrained_sd: + # direct state_dict pass through is the highest priority + load_from = 'state_dict' + pretrained_loc = pretrained_sd + assert isinstance(pretrained_loc, dict) + elif pretrained_file: + # file load override is the second-highest priority if set + load_from = 'file' + pretrained_loc = pretrained_file + else: + old_cache_valid = False + if _USE_OLD_CACHE: + # prioritized old cached weights if exists and env var enabled + old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False + if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True): + # hf-hub available as alternate weight source in default_cfg + load_from = 'hf-hub' + pretrained_loc = hf_hub_id + elif pretrained_url: + load_from = 'url' + pretrained_loc = pretrained_url + + if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None): + # if a filename override is set, return tuple for location w/ (hub_id, filename) + pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename'] + return load_from, pretrained_loc + + +def set_pretrained_download_progress(enable=True): + """ Set download progress for pretrained weights on/off (globally). """ + global _DOWNLOAD_PROGRESS + _DOWNLOAD_PROGRESS = enable + + +def set_pretrained_check_hash(enable=True): + """ Set hash checking for pretrained weights on/off (globally). """ + global _CHECK_HASH + _CHECK_HASH = enable + + +def load_custom_pretrained( + model: nn.Module, + pretrained_cfg: Optional[Dict] = None, + load_fn: Optional[Callable] = None, + cache_dir: Optional[Union[str, Path]] = None, +): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + pretrained_cfg: Default pretrained model cfg + load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named + 'load_pretrained' on the model will be called if it exists + cache_dir: Override model checkpoint cache dir for this load + """ + pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) + if not pretrained_cfg: + _logger.warning("Invalid pretrained config, cannot load weights.") + return + + load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) + if not load_from: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if load_from == 'hf-hub': + _logger.warning("Hugging Face hub not currently supported for custom load pretrained models.") + elif load_from == 'url': + pretrained_loc = download_cached_file( + pretrained_loc, + check_hash=_CHECK_HASH, + progress=_DOWNLOAD_PROGRESS, + cache_dir=cache_dir, + ) + + if load_fn is not None: + load_fn(model, pretrained_loc) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(pretrained_loc) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def load_pretrained( + model: nn.Module, + pretrained_cfg: Optional[Dict] = None, + num_classes: int = 1000, + in_chans: int = 3, + filter_fn: Optional[Callable] = None, + strict: bool = True, + cache_dir: Optional[Union[str, Path]] = None, +): + """ Load pretrained checkpoint + + Args: + model: PyTorch module + pretrained_cfg: Configuration for pretrained weights / target dataset + num_classes: Number of classes for target model. Will adapt pretrained if different. + in_chans: Number of input chans for target model. Will adapt pretrained if different. + filter_fn: state_dict filter fn for load (takes state_dict, model as args) + strict: Strict load of checkpoint + cache_dir: Override model checkpoint cache dir for this load + """ + pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) + if not pretrained_cfg: + raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.") + + load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) + if load_from == 'state_dict': + _logger.info(f'Loading pretrained weights from state dict') + state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override + elif load_from == 'file': + _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') + if pretrained_cfg.get('custom_load', False): + model.load_pretrained(pretrained_loc) + return + else: + state_dict = load_state_dict(pretrained_loc) + elif load_from == 'url': + _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') + if pretrained_cfg.get('custom_load', False): + pretrained_loc = download_cached_file( + pretrained_loc, + progress=_DOWNLOAD_PROGRESS, + check_hash=_CHECK_HASH, + cache_dir=cache_dir, + ) + model.load_pretrained(pretrained_loc) + return + else: + try: + state_dict = load_state_dict_from_url( + pretrained_loc, + map_location='cpu', + progress=_DOWNLOAD_PROGRESS, + check_hash=_CHECK_HASH, + weights_only=True, + model_dir=cache_dir, + ) + except TypeError: + state_dict = load_state_dict_from_url( + pretrained_loc, + map_location='cpu', + progress=_DOWNLOAD_PROGRESS, + check_hash=_CHECK_HASH, + model_dir=cache_dir, + ) + elif load_from == 'hf-hub': + _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') + if isinstance(pretrained_loc, (list, tuple)): + custom_load = pretrained_cfg.get('custom_load', False) + if isinstance(custom_load, str) and custom_load == 'hf': + load_custom_from_hf(*pretrained_loc, model, cache_dir=cache_dir) + return + else: + state_dict = load_state_dict_from_hf(*pretrained_loc, cache_dir=cache_dir) + else: + state_dict = load_state_dict_from_hf(pretrained_loc, weights_only=True, cache_dir=cache_dir) + else: + model_name = pretrained_cfg.get('architecture', 'this model') + raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.") + + if filter_fn is not None: + try: + state_dict = filter_fn(state_dict, model) + except TypeError as e: + # for backwards compat with filter fn that take one arg + state_dict = filter_fn(state_dict) + + input_convs = pretrained_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = pretrained_cfg.get('classifier', None) + label_offset = pretrained_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != pretrained_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + state_dict.pop(classifier_name + '.weight', None) + state_dict.pop(classifier_name + '.bias', None) + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + load_result = model.load_state_dict(state_dict, strict=strict) + if load_result.missing_keys: + _logger.info( + f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.' + f' This is expected if model is being adapted.') + if load_result.unexpected_keys: + _logger.warning( + f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.' + f' This may be expected if model is being adapted.') + + +def pretrained_cfg_for_features(pretrained_cfg): + pretrained_cfg = deepcopy(pretrained_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + pretrained_cfg.pop(tr, None) + return pretrained_cfg + + +def _filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + Args: + pretrained_cfg: input pretrained cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if pretrained_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + + for n in default_kwarg_names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # pretrained_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = pretrained_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = pretrained_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + elif n == 'num_classes': + default_val = pretrained_cfg.get(n, None) + # if default is < 0, don't pass through to model + if default_val is not None and default_val >= 0: + kwargs.setdefault(n, pretrained_cfg[n]) + else: + default_val = pretrained_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, pretrained_cfg[n]) + + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + _filter_kwargs(kwargs, names=kwargs_filter) + + +def resolve_pretrained_cfg( + variant: str, + pretrained_cfg: Optional[Union[str, Dict[str, Any]]] = None, + pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, +) -> PretrainedCfg: + model_with_tag = variant + pretrained_tag = None + if pretrained_cfg: + if isinstance(pretrained_cfg, dict): + # pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg + pretrained_cfg = PretrainedCfg(**pretrained_cfg) + elif isinstance(pretrained_cfg, str): + pretrained_tag = pretrained_cfg + pretrained_cfg = None + + # fallback to looking up pretrained cfg in model registry by variant identifier + if not pretrained_cfg: + if pretrained_tag: + model_with_tag = '.'.join([variant, pretrained_tag]) + pretrained_cfg = get_pretrained_cfg(model_with_tag) + + if not pretrained_cfg: + _logger.warning( + f"No pretrained configuration specified for {model_with_tag} model. Using a default." + f" Please add a config to the model pretrained_cfg registry or pass explicitly.") + pretrained_cfg = PretrainedCfg() # instance with defaults + + pretrained_cfg_overlay = pretrained_cfg_overlay or {} + if not pretrained_cfg.architecture: + pretrained_cfg_overlay.setdefault('architecture', variant) + pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay) + + return pretrained_cfg + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + pretrained_cfg: Optional[Dict] = None, + pretrained_cfg_overlay: Optional[Dict] = None, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[Dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + cache_dir: Optional[Union[str, Path]] = None, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs, +): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretrained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls: Model class + variant: Model variant name + pretrained: Load the pretrained weights + pretrained_cfg: Model's pretrained weight/task config + pretrained_cfg_overlay: Entries that will override those in pretrained_cfg + model_cfg: Model's architecture config + feature_cfg: Feature extraction adapter config + pretrained_strict: Load pretrained weights strictly + pretrained_filter_fn: Filter callable for pretrained weights + cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints + kwargs_filter: Kwargs keys to filter (remove) before passing to model + **kwargs: Model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + + # resolve and update model pretrained config and model kwargs + pretrained_cfg = resolve_pretrained_cfg( + variant, + pretrained_cfg=pretrained_cfg, + pretrained_cfg_overlay=pretrained_cfg_overlay + ) + pretrained_cfg = pretrained_cfg.to_dict() + + _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + if 'feature_cls' in kwargs: + feature_cfg['feature_cls'] = kwargs.pop('feature_cls') + + # Instantiate the model + if model_cfg is None: + model = model_cls(**kwargs) + else: + model = model_cls(cfg=model_cfg, **kwargs) + model.pretrained_cfg = pretrained_cfg + model.default_cfg = model.pretrained_cfg # alias for backwards compat + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + load_pretrained( + model, + pretrained_cfg=pretrained_cfg, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict, + cache_dir=cache_dir, + ) + + # Wrap the model in a feature extraction module if enabled + if features: + use_getter = False + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + + # flatten_sequential only valid for some feature extractors + if feature_cls not in ('dict', 'list', 'hook'): + feature_cfg.pop('flatten_sequential', None) + + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + elif feature_cls == 'list': + feature_cls = FeatureListNet + elif feature_cls == 'dict': + feature_cls = FeatureDictNet + elif feature_cls == 'fx': + feature_cls = FeatureGraphNet + elif feature_cls == 'getter': + use_getter = True + feature_cls = FeatureGetterNet + else: + assert False, f'Unknown feature class {feature_cls}' + else: + feature_cls = FeatureListNet + + output_fmt = getattr(model, 'output_fmt', None) + if output_fmt is not None and not use_getter: # don't set default for intermediate feat getter + feature_cfg.setdefault('output_fmt', output_fmt) + + model = feature_cls(model, **feature_cfg) + model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg + model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg) + + return model diff --git a/pytorch-image-models/timm/models/_efficientnet_blocks.py b/pytorch-image-models/timm/models/_efficientnet_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..c90d001a870cce57bb91e5f27e127f7bd9e95167 --- /dev/null +++ b/pytorch-image-models/timm/models/_efficientnet_blocks.py @@ -0,0 +1,702 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2019, Ross Wightman +""" +from typing import Callable, Dict, Optional, Type + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, create_aa, to_2tuple, LayerType,\ + ConvNormAct, get_norm_act_layer, MultiQueryAttention2d, Attention2d + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual', + 'UniversalInvertedResidual', 'MobileAttention' +] + +ModuleType = Type[nn.Module] + + +def num_groups(group_size: Optional[int], channels: int): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, + in_chs: int, + rd_ratio: float = 0.25, + rd_channels: Optional[int] = None, + act_layer: LayerType = nn.ReLU, + gate_layer: LayerType = nn.Sigmoid, + force_act_layer: Optional[LayerType] = None, + rd_round_fn: Optional[Callable] = None, + ): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + group_size: int = 0, + pad_type: str = '', + skip: bool = False, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + drop_path_rate: float = 0., + ): + super(ConvBnAct, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + groups = num_groups(group_size, in_chs) + self.has_skip = skip and stride == 1 and in_chs == out_chs + use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation + + self.conv = create_conv2d( + in_chs, out_chs, kernel_size, + stride=1 if use_aa else stride, + dilation=dilation, groups=groups, padding=pad_type) + self.bn1 = norm_act_layer(out_chs, inplace=True) + self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.conv.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + x = self.aa(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ Depthwise-separable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, + in_chs: int, + out_chs: int, + dw_kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + group_size: int = 1, + pad_type: str = '', + noskip: bool = False, + pw_kernel_size: int = 1, + pw_act: bool = False, + s2d: int = 0, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[ModuleType] = None, + drop_path_rate: float = 0., + ): + super(DepthwiseSeparableConv, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation + + # Space to depth + if s2d == 1: + sd_chs = int(in_chs * 4) + self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same') + self.bn_s2d = norm_act_layer(sd_chs, sd_chs) + dw_kernel_size = (dw_kernel_size + 1) // 2 + dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type + in_chs = sd_chs + use_aa = False # disable AA + else: + self.conv_s2d = None + self.bn_s2d = None + dw_pad_type = pad_type + + groups = num_groups(group_size, in_chs) + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, + stride=1 if use_aa else stride, + dilation=dilation, padding=dw_pad_type, groups=groups) + self.bn1 = norm_act_layer(in_chs, inplace=True) + self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.conv_pw.out_channels) + + def forward(self, x): + shortcut = x + if self.conv_s2d is not None: + x = self.conv_s2d(x) + x = self.bn_s2d(x) + x = self.conv_dw(x) + x = self.bn1(x) + x = self.aa(x) + x = self.se(x) + x = self.conv_pw(x) + x = self.bn2(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + dw_kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + group_size: int = 1, + pad_type: str = '', + noskip: bool = False, + exp_ratio: float = 1.0, + exp_kernel_size: int = 1, + pw_kernel_size: int = 1, + s2d: int = 0, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[ModuleType] = None, + conv_kwargs: Optional[Dict] = None, + drop_path_rate: float = 0., + ): + super(InvertedResidual, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + conv_kwargs = conv_kwargs or {} + self.has_skip = (in_chs == out_chs and stride == 1) and not noskip + use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation + + # Space to depth + if s2d == 1: + sd_chs = int(in_chs * 4) + self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same') + self.bn_s2d = norm_act_layer(sd_chs, sd_chs) + dw_kernel_size = (dw_kernel_size + 1) // 2 + dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type + in_chs = sd_chs + use_aa = False # disable AA + else: + self.conv_s2d = None + self.bn_s2d = None + dw_pad_type = pad_type + + mid_chs = make_divisible(in_chs * exp_ratio) + groups = num_groups(group_size, mid_chs) + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_act_layer(mid_chs, inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, + stride=1 if use_aa else stride, + dilation=dilation, groups=groups, padding=dw_pad_type, **conv_kwargs) + self.bn2 = norm_act_layer(mid_chs, inplace=True) + self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_act_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.conv_pwl.out_channels) + + def forward(self, x): + shortcut = x + if self.conv_s2d is not None: + x = self.conv_s2d(x) + x = self.bn_s2d(x) + x = self.conv_pw(x) + x = self.bn1(x) + x = self.conv_dw(x) + x = self.bn2(x) + x = self.aa(x) + x = self.se(x) + x = self.conv_pwl(x) + x = self.bn3(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class LayerScale2d(nn.Module): + def __init__(self, dim: int, init_values: float = 1e-5, inplace: bool = False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class UniversalInvertedResidual(nn.Module): + """ Universal Inverted Residual Block (aka Universal Inverted Bottleneck, UIB) + + For MobileNetV4 - https://arxiv.org/abs/, referenced from + https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L778 + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + dw_kernel_size_start: int = 0, + dw_kernel_size_mid: int = 3, + dw_kernel_size_end: int = 0, + stride: int = 1, + dilation: int = 1, + group_size: int = 1, + pad_type: str = '', + noskip: bool = False, + exp_ratio: float = 1.0, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[ModuleType] = None, + conv_kwargs: Optional[Dict] = None, + drop_path_rate: float = 0., + layer_scale_init_value: Optional[float] = 1e-5, + ): + super(UniversalInvertedResidual, self).__init__() + conv_kwargs = conv_kwargs or {} + self.has_skip = (in_chs == out_chs and stride == 1) and not noskip + if stride > 1: + assert dw_kernel_size_start or dw_kernel_size_mid or dw_kernel_size_end + + # FIXME dilation isn't right w/ extra ks > 1 convs + if dw_kernel_size_start: + dw_start_stride = stride if not dw_kernel_size_mid else 1 + dw_start_groups = num_groups(group_size, in_chs) + self.dw_start = ConvNormAct( + in_chs, in_chs, dw_kernel_size_start, + stride=dw_start_stride, + dilation=dilation, # FIXME + groups=dw_start_groups, + padding=pad_type, + apply_act=False, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + **conv_kwargs, + ) + else: + self.dw_start = nn.Identity() + + # Point-wise expansion + mid_chs = make_divisible(in_chs * exp_ratio) + self.pw_exp = ConvNormAct( + in_chs, mid_chs, 1, + padding=pad_type, + act_layer=act_layer, + norm_layer=norm_layer, + **conv_kwargs, + ) + + # Middle depth-wise convolution + if dw_kernel_size_mid: + groups = num_groups(group_size, mid_chs) + self.dw_mid = ConvNormAct( + mid_chs, mid_chs, dw_kernel_size_mid, + stride=stride, + dilation=dilation, # FIXME + groups=groups, + padding=pad_type, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + **conv_kwargs, + ) + else: + # keeping mid as identity so it can be hooked more easily for features + self.dw_mid = nn.Identity() + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.pw_proj = ConvNormAct( + mid_chs, out_chs, 1, + padding=pad_type, + apply_act=False, + act_layer=act_layer, + norm_layer=norm_layer, + **conv_kwargs, + ) + + if dw_kernel_size_end: + dw_end_stride = stride if not dw_kernel_size_start and not dw_kernel_size_mid else 1 + dw_end_groups = num_groups(group_size, out_chs) + if dw_end_stride > 1: + assert not aa_layer + self.dw_end = ConvNormAct( + out_chs, out_chs, dw_kernel_size_end, + stride=dw_end_stride, + dilation=dilation, + groups=dw_end_groups, + padding=pad_type, + apply_act=False, + act_layer=act_layer, + norm_layer=norm_layer, + **conv_kwargs, + ) + else: + self.dw_end = nn.Identity() + + if layer_scale_init_value is not None: + self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value) + else: + self.layer_scale = nn.Identity() + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + return dict(module='pw_proj.conv', hook_type='forward_pre', num_chs=self.pw_proj.conv.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.pw_proj.conv.out_channels) + + def forward(self, x): + shortcut = x + x = self.dw_start(x) + x = self.pw_exp(x) + x = self.dw_mid(x) + x = self.se(x) + x = self.pw_proj(x) + x = self.dw_end(x) + x = self.layer_scale(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class MobileAttention(nn.Module): + """ Mobile Attention Block + + For MobileNetV4 - https://arxiv.org/abs/, referenced from + https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L1504 + """ + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dw_kernel_size: int = 3, + dilation: int = 1, + group_size: int = 1, + pad_type: str = '', + num_heads: int = 8, + key_dim: int = 64, + value_dim: int = 64, + use_multi_query: bool = False, + query_strides: int = (1, 1), + kv_stride: int = 1, + cpe_dw_kernel_size: int = 3, + noskip: bool = False, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + drop_path_rate: float = 0., + attn_drop: float = 0.0, + proj_drop: float = 0.0, + layer_scale_init_value: Optional[float] = 1e-5, + use_bias: bool = False, + use_cpe: bool = False, + ): + super(MobileAttention, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip + self.query_strides = to_2tuple(query_strides) + self.kv_stride = kv_stride + self.has_query_stride = any([s > 1 for s in self.query_strides]) + + # This CPE is different than the one suggested in the original paper. + # https://arxiv.org/abs/2102.10882 + # 1. Rather than adding one CPE before the attention blocks, we add a CPE + # into every attention block. + # 2. We replace the expensive Conv2D by a Seperable DW Conv. + if use_cpe: + self.conv_cpe_dw = create_conv2d( + in_chs, in_chs, + kernel_size=cpe_dw_kernel_size, + dilation=dilation, + depthwise=True, + bias=True, + ) + else: + self.conv_cpe_dw = None + + self.norm = norm_act_layer(in_chs, apply_act=False) + + if num_heads is None: + assert in_chs % key_dim == 0 + num_heads = in_chs // key_dim + + if use_multi_query: + self.attn = MultiQueryAttention2d( + in_chs, + dim_out=out_chs, + num_heads=num_heads, + key_dim=key_dim, + value_dim=value_dim, + query_strides=query_strides, + kv_stride=kv_stride, + dilation=dilation, + padding=pad_type, + dw_kernel_size=dw_kernel_size, + attn_drop=attn_drop, + proj_drop=proj_drop, + #bias=use_bias, # why not here if used w/ mhsa? + ) + else: + self.attn = Attention2d( + in_chs, + dim_out=out_chs, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + bias=use_bias, + ) + + if layer_scale_init_value is not None: + self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value) + else: + self.layer_scale = nn.Identity() + + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.conv_pw.out_channels) + + def forward(self, x): + if self.conv_cpe_dw is not None: + x_cpe = self.conv_cpe_dw(x) + x = x + x_cpe + + shortcut = x + x = self.norm(x) + x = self.attn(x) + x = self.layer_scale(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, + in_chs: int, + out_chs: int, + dw_kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + group_size: int = 1, + pad_type: str = '', + noskip: bool = False, + exp_ratio: float = 1.0, + exp_kernel_size: int = 1, + pw_kernel_size: int = 1, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[ModuleType] = None, + num_experts: int = 0, + drop_path_rate: float = 0., + ): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + super(CondConvResidual, self).__init__( + in_chs, + out_chs, + dw_kernel_size=dw_kernel_size, + stride=stride, + dilation=dilation, + group_size=group_size, + pad_type=pad_type, + noskip=noskip, + exp_ratio=exp_ratio, + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + se_layer=se_layer, + conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate, + ) + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.se(x) + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + exp_kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + group_size: int = 0, + pad_type: str = '', + force_in_chs: int = 0, + noskip: bool = False, + exp_ratio: float = 1.0, + pw_kernel_size: int = 1, + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[ModuleType] = None, + drop_path_rate: float = 0., + ): + super(EdgeResidual, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + groups = num_groups(group_size, mid_chs) # NOTE: Using out_chs of conv_exp for groups calc + self.has_skip = (in_chs == out_chs and stride == 1) and not noskip + use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, + stride=1 if use_aa else stride, + dilation=dilation, groups=groups, padding=pad_type) + self.bn1 = norm_act_layer(mid_chs, inplace=True) + + self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_act_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', num_chs=self.conv_pwl.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv_exp(x) + x = self.bn1(x) + x = self.aa(x) + x = self.se(x) + x = self.conv_pwl(x) + x = self.bn2(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x diff --git a/pytorch-image-models/timm/models/_factory.py b/pytorch-image-models/timm/models/_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..b347bc4deff2a3131cba34b607bace1e1d6d456d --- /dev/null +++ b/pytorch-image-models/timm/models/_factory.py @@ -0,0 +1,137 @@ +import os +from pathlib import Path +from typing import Any, Dict, Optional, Union +from urllib.parse import urlsplit + +from timm.layers import set_layer_config +from ._helpers import load_checkpoint +from ._hub import load_model_config_from_hf +from ._pretrained import PretrainedCfg +from ._registry import is_model, model_entrypoint, split_model_name_tag + + +__all__ = ['parse_model_name', 'safe_model_name', 'create_model'] + + +def parse_model_name(model_name: str): + if model_name.startswith('hf_hub'): + # NOTE for backwards compat, deprecate hf_hub use + model_name = model_name.replace('hf_hub', 'hf-hub') + parsed = urlsplit(model_name) + assert parsed.scheme in ('', 'timm', 'hf-hub') + if parsed.scheme == 'hf-hub': + # FIXME may use fragment as revision, currently `@` in URI path + return parsed.scheme, parsed.path + else: + model_name = os.path.split(parsed.path)[-1] + return 'timm', model_name + + +def safe_model_name(model_name: str, remove_source: bool = True): + # return a filename / path safe model name + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = parse_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name: str, + pretrained: bool = False, + pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None, + pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, + checkpoint_path: Optional[Union[str, Path]] = None, + cache_dir: Optional[Union[str, Path]] = None, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + **kwargs, +): + """Create a model. + + Lookup model's entrypoint function and pass relevant args to create a new model. + + Tip: + **kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()`` + and then the model class __init__(). kwargs values set to None are pruned before passing. + + Args: + model_name: Name of model to instantiate. + pretrained: If set to `True`, load pretrained ImageNet-1k weights. + pretrained_cfg: Pass in an external pretrained_cfg for model. + pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these. + checkpoint_path: Path of checkpoint to load _after_ the model is initialized. + cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints. + scriptable: Set layer config so that model is jit scriptable (not working for all models yet). + exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet). + no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only). + + Keyword Args: + drop_rate (float): Classifier dropout rate for training. + drop_path_rate (float): Stochastic depth drop rate for training. + global_pool (str): Classifier global pooling type. + + Example: + + ```py + >>> from timm import create_model + + >>> # Create a MobileNetV3-Large model with no pretrained weights. + >>> model = create_model('mobilenetv3_large_100') + + >>> # Create a MobileNetV3-Large model with pretrained weights. + >>> model = create_model('mobilenetv3_large_100', pretrained=True) + >>> model.num_classes + 1000 + + >>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes. + >>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10) + >>> model.num_classes + 10 + + >>> # Create a Dinov2 small model with pretrained weights and save weights in a custom directory. + >>> model = create_model('vit_small_patch14_dinov2.lvd142m', pretrained=True, cache_dir="/data/my-models") + >>> # Data will be stored at `/data/my-models/models--timm--vit_small_patch14_dinov2.lvd142m/` + ``` + """ + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + model_source, model_name = parse_model_name(model_name) + if model_source == 'hf-hub': + assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.' + # For model names specified in the form `hf-hub:path/architecture_name@revision`, + # load model weights + pretrained_cfg from Hugging Face hub. + pretrained_cfg, model_name, model_args = load_model_config_from_hf( + model_name, + cache_dir=cache_dir, + ) + if model_args: + for k, v in model_args.items(): + kwargs.setdefault(k, v) + else: + model_name, pretrained_tag = split_model_name_tag(model_name) + if pretrained_tag and not pretrained_cfg: + # a valid pretrained_cfg argument takes priority over tag in model name + pretrained_cfg = pretrained_tag + + if not is_model(model_name): + raise RuntimeError('Unknown model (%s)' % model_name) + + create_fn = model_entrypoint(model_name) + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn( + pretrained=pretrained, + pretrained_cfg=pretrained_cfg, + pretrained_cfg_overlay=pretrained_cfg_overlay, + cache_dir=cache_dir, + **kwargs, + ) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/pytorch-image-models/timm/models/_features.py b/pytorch-image-models/timm/models/_features.py new file mode 100644 index 0000000000000000000000000000000000000000..14d174f5d4ee4fef71bd1c29d00fe40e91794b04 --- /dev/null +++ b/pytorch-image-models/timm/models/_features.py @@ -0,0 +1,484 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from timm.layers import Format, _assert + + +__all__ = [ + 'FeatureInfo', 'FeatureHooks', 'FeatureDictNet', 'FeatureListNet', 'FeatureHookNet', 'FeatureGetterNet', + 'feature_take_indices' +] + + +def feature_take_indices( + num_features: int, + indices: Optional[Union[int, List[int]]] = None, + as_set: bool = False, +) -> Tuple[List[int], int]: + """ Determine the absolute feature indices to 'take' from. + + Note: This function can be called in forwar() so must be torchscript compatible, + which requires some incomplete typing and workaround hacks. + + Args: + num_features: total number of features to select from + indices: indices to select, + None -> select all + int -> select last n + list/tuple of int -> return specified (-ve indices specify from end) + as_set: return as a set + + Returns: + List (or set) of absolute (from beginning) indices, Maximum index + """ + if indices is None: + indices = num_features # all features if None + + if isinstance(indices, int): + # convert int -> last n indices + _assert(0 < indices <= num_features, f'last-n ({indices}) is out of range (1 to {num_features})') + take_indices = [num_features - indices + i for i in range(indices)] + else: + take_indices: List[int] = [] + for i in indices: + idx = num_features + i if i < 0 else i + _assert(0 <= idx < num_features, f'feature index {idx} is out of range (0 to {num_features - 1})') + take_indices.append(idx) + + if not torch.jit.is_scripting() and as_set: + return set(take_indices), max(take_indices) + + return take_indices, max(take_indices) + + +def _out_indices_as_tuple(x: Union[int, Tuple[int, ...]]) -> Tuple[int, ...]: + if isinstance(x, int): + # if indices is an int, take last N features + return tuple(range(-x, 0)) + return tuple(x) + + +OutIndicesT = Union[int, Tuple[int, ...]] + + +class FeatureInfo: + + def __init__( + self, + feature_info: List[Dict], + out_indices: OutIndicesT, + ): + out_indices = _out_indices_as_tuple(out_indices) + prev_reduction = 1 + for i, fi in enumerate(feature_info): + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + fi.setdefault('index', i) + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: OutIndicesT): + out_indices = _out_indices_as_tuple(out_indices) + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key: str, idx: Optional[Union[int, List[int]]] = None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tuple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys: Optional[List[str]] = None, idx: Optional[Union[int, List[int]]] = None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx: Optional[Union[int, List[int]]] = None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx: Optional[Union[int, List[int]]] = None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx: Optional[Union[int, List[int]]] = None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. + + FIXME This works well in eager Python but needs redesign for torchscript. + """ + + def __init__( + self, + hooks: Sequence[Union[str, Dict]], + named_modules: dict, + out_map: Sequence[Union[int, str]] = None, + default_hook_type: str = 'forward', + ): + # setup feature hooks + self._feature_outputs = defaultdict(OrderedDict) + self._handles = [] + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h if isinstance(h, str) else h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = default_hook_type + if isinstance(h, dict): + hook_type = h.get('hook_type', default_hook_type) + if hook_type == 'forward_pre': + handle = m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + handle = m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._handles.append(handle) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices: OutIndicesT): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + """ + def __init__( + self, + model: nn.Module, + out_indices: OutIndicesT = (0, 1, 2, 3, 4), + out_map: Sequence[Union[int, str]] = None, + output_fmt: str = 'NCHW', + feature_concat: bool = False, + flatten_sequential: bool = False, + ): + """ + Args: + model: Model from which to extract features. + out_indices: Output indices of the model features to extract. + out_map: Return id mapping for each output index, otherwise str(index) is used. + feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting + first element e.g. `x[0]` + flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules) + """ + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.output_fmt = Format(output_fmt) + self.concat = feature_concat + self.grad_checkpointing = False + self.return_layers = {} + + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def set_grad_checkpointing(self, enable: bool = True): + self.grad_checkpointing = enable + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for i, (name, module) in enumerate(self.items()): + if self.grad_checkpointing and not torch.jit.is_scripting(): + # Skipping checkpoint of first module because need a gradient at input + # Skipping last because networks with in-place ops might fail w/ checkpointing enabled + # NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues + first_or_last_module = i == 0 or i == max(len(self) - 1, 0) + x = module(x) if first_or_last_module else checkpoint(module, x) + else: + x = module(x) + + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + A specialization of FeatureDictNet that always returns features as a list (values() of dict). + """ + def __init__( + self, + model: nn.Module, + out_indices: OutIndicesT = (0, 1, 2, 3, 4), + output_fmt: str = 'NCHW', + feature_concat: bool = False, + flatten_sequential: bool = False, + ): + """ + Args: + model: Model from which to extract features. + out_indices: Output indices of the model features to extract. + feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting + first element e.g. `x[0]` + flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules) + """ + super().__init__( + model, + out_indices=out_indices, + output_fmt=output_fmt, + feature_concat=feature_concat, + flatten_sequential=flatten_sequential, + ) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, + model: nn.Module, + out_indices: OutIndicesT = (0, 1, 2, 3, 4), + out_map: Optional[Sequence[Union[int, str]]] = None, + return_dict: bool = False, + output_fmt: str = 'NCHW', + no_rewrite: Optional[bool] = None, + flatten_sequential: bool = False, + default_hook_type: str = 'forward', + ): + """ + + Args: + model: Model from which to extract features. + out_indices: Output indices of the model features to extract. + out_map: Return id mapping for each output index, otherwise str(index) is used. + return_dict: Output features as a dict. + no_rewrite: Enforce that model is not re-written if True, ie no modules are removed / changed. + flatten_sequential arg must also be False if this is set True. + flatten_sequential: Re-write modules by flattening first two levels of nn.Sequential containers. + default_hook_type: The default hook type to use if not specified in model.feature_info. + """ + super().__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.return_dict = return_dict + self.output_fmt = Format(output_fmt) + self.grad_checkpointing = False + if no_rewrite is None: + no_rewrite = not flatten_sequential + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = { + f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts() + } + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def set_grad_checkpointing(self, enable: bool = True): + self.grad_checkpointing = enable + + def forward(self, x): + for i, (name, module) in enumerate(self.items()): + if self.grad_checkpointing and not torch.jit.is_scripting(): + # Skipping checkpoint of first module because need a gradient at input + # Skipping last because networks with in-place ops might fail w/ checkpointing enabled + # NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues + first_or_last_module = i == 0 or i == max(len(self) - 1, 0) + x = module(x) if first_or_last_module else checkpoint(module, x) + else: + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.return_dict else list(out.values()) + + +class FeatureGetterNet(nn.ModuleDict): + """ FeatureGetterNet + + Wrap models with a feature getter method, like 'get_intermediate_layers' + + """ + def __init__( + self, + model: nn.Module, + out_indices: OutIndicesT = 4, + out_map: Optional[Sequence[Union[int, str]]] = None, + return_dict: bool = False, + output_fmt: str = 'NCHW', + norm: bool = False, + prune: bool = True, + ): + """ + + Args: + model: Model to wrap. + out_indices: Indices of features to extract. + out_map: Remap feature names for dict output (WIP, not supported). + return_dict: Return features as dictionary instead of list (WIP, not supported). + norm: Apply final model norm to all output features (if possible). + """ + super().__init__() + if prune and hasattr(model, 'prune_intermediate_layers'): + # replace out_indices after they've been normalized, -ve indices will be invalid after prune + out_indices = model.prune_intermediate_layers( + out_indices, + prune_norm=not norm, + ) + self.feature_info = _get_feature_info(model, out_indices) + self.model = model + self.out_indices = out_indices + self.out_map = out_map + self.return_dict = return_dict + self.output_fmt = Format(output_fmt) + self.norm = norm + + def forward(self, x): + features = self.model.forward_intermediates( + x, + indices=self.out_indices, + norm=self.norm, + output_fmt=self.output_fmt, + intermediates_only=True, + ) + return features diff --git a/pytorch-image-models/timm/models/_features_fx.py b/pytorch-image-models/timm/models/_features_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..6679b38b463e2316256f43a028f13562dddbacf3 --- /dev/null +++ b/pytorch-image-models/timm/models/_features_fx.py @@ -0,0 +1,179 @@ +""" PyTorch FX Based Feature Extraction Helpers +Using https://pytorch.org/vision/stable/feature_extraction.html +""" +from typing import Callable, Dict, List, Optional, Union, Tuple, Type + +import torch +from torch import nn + +from ._features import _get_feature_info, _get_return_layers + +try: + # NOTE we wrap torchvision fns to use timm leaf / no trace definitions + from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor + from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names + has_fx_feature_extraction = True +except ImportError: + has_fx_feature_extraction = False + +# Layers we went to treat as leaf modules +from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame, Format +from timm.layers import resample_abs_pos_embed, resample_abs_pos_embed_nhwc +from timm.layers.non_local_attn import BilinearAttnTransform +from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame +from timm.layers.norm_act import ( + BatchNormAct2d, + SyncBatchNormAct, + FrozenBatchNormAct2d, + GroupNormAct, + GroupNorm1Act, + LayerNormAct, + LayerNormAct2d +) + +__all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules', + 'register_notrace_function', 'is_notrace_function', 'get_notrace_functions', + 'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet'] + + +# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here +# BUT modules from timm.models should use the registration mechanism below +_leaf_modules = { + BilinearAttnTransform, # reason: flow control t <= 1 + # Reason: get_same_padding has a max which raises a control flow error + Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame, + CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]), + BatchNormAct2d, + SyncBatchNormAct, + FrozenBatchNormAct2d, + GroupNormAct, + GroupNorm1Act, + LayerNormAct, + LayerNormAct2d, +} + +try: + from timm.layers import InplaceAbn + _leaf_modules.add(InplaceAbn) +except ImportError: + pass + + +def register_notrace_module(module: Type[nn.Module]): + """ + Any module not under timm.models.layers should get this decorator if we don't want to trace through it. + """ + _leaf_modules.add(module) + return module + + +def is_notrace_module(module: Type[nn.Module]): + return module in _leaf_modules + + +def get_notrace_modules(): + return list(_leaf_modules) + + +# Functions we want to autowrap (treat them as leaves) +_autowrap_functions = { + resample_abs_pos_embed, + resample_abs_pos_embed_nhwc, +} + + +def register_notrace_function(func: Callable): + """ + Decorator for functions which ought not to be traced through + """ + _autowrap_functions.add(func) + return func + + +def is_notrace_function(func: Callable): + return func in _autowrap_functions + + +def get_notrace_functions(): + return list(_autowrap_functions) + + +def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]: + return _get_graph_node_names( + model, + tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)} + ) + + +def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]): + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + return _create_feature_extractor( + model, return_nodes, + tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)} + ) + + +class FeatureGraphNet(nn.Module): + """ A FX Graph based feature extractor that works with the model feature_info metadata + """ + return_dict: torch.jit.Final[bool] + + def __init__( + self, + model: nn.Module, + out_indices: Tuple[int, ...], + out_map: Optional[Dict] = None, + output_fmt: str = 'NCHW', + return_dict: bool = False, + ): + super().__init__() + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + self.feature_info = _get_feature_info(model, out_indices) + if out_map is not None: + assert len(out_map) == len(out_indices) + self.output_fmt = Format(output_fmt) + return_nodes = _get_return_layers(self.feature_info, out_map) + self.graph_module = create_feature_extractor(model, return_nodes) + self.return_dict = return_dict + + def forward(self, x): + out = self.graph_module(x) + if self.return_dict: + return out + return list(out.values()) + + +class GraphExtractNet(nn.Module): + """ A standalone feature extraction wrapper that maps dict -> list or single tensor + NOTE: + * one can use feature_extractor directly if dictionary output is desired + * unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info + metadata for builtin feature extraction mode + * create_feature_extractor can be used directly if dictionary output is acceptable + + Args: + model: model to extract features from + return_nodes: node names to return features from (dict or list) + squeeze_out: if only one output, and output in list format, flatten to single tensor + return_dict: return as dictionary from extractor with node names as keys, ignores squeeze_out arg + """ + return_dict: torch.jit.Final[bool] + + def __init__( + self, + model: nn.Module, + return_nodes: Union[Dict[str, str], List[str]], + squeeze_out: bool = True, + return_dict: bool = False, + ): + super().__init__() + self.squeeze_out = squeeze_out + self.graph_module = create_feature_extractor(model, return_nodes) + self.return_dict = return_dict + + def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]: + out = self.graph_module(x) + if self.return_dict: + return out + out = list(out.values()) + return out[0] if self.squeeze_out and len(out) == 1 else out diff --git a/pytorch-image-models/timm/models/_hub.py b/pytorch-image-models/timm/models/_hub.py new file mode 100644 index 0000000000000000000000000000000000000000..4922dfc09a3368153d6372950be99acda7b806de --- /dev/null +++ b/pytorch-image-models/timm/models/_hub.py @@ -0,0 +1,465 @@ +import hashlib +import json +import logging +import os +from functools import partial +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Iterable, List, Optional, Tuple, Union + +import torch +from torch.hub import HASH_REGEX, download_url_to_file, urlparse + +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +try: + import safetensors.torch + _has_safetensors = True +except ImportError: + _has_safetensors = False + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from timm import __version__ +from timm.models._pretrained import filter_pretrained_cfg + +try: + from huggingface_hub import ( + create_repo, get_hf_file_metadata, + hf_hub_download, hf_hub_url, + repo_type_and_id_from_hf_id, upload_folder) + from huggingface_hub.utils import EntryNotFoundError + hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__) + _has_hf_hub = True +except ImportError: + hf_hub_download = None + _has_hf_hub = False + +_logger = logging.getLogger(__name__) + +__all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf', + 'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub'] + +# Default name for a weights file hosted on the Huggingface Hub. +HF_WEIGHTS_NAME = "pytorch_model.bin" # default pytorch pkl +HF_SAFE_WEIGHTS_NAME = "model.safetensors" # safetensors version +HF_OPEN_CLIP_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl +HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version + + +def get_cache_dir(child_dir: str = ''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file( + url: Union[str, List[str], Tuple[str, str]], + check_hash: bool = True, + progress: bool = False, + cache_dir: Optional[Union[str, Path]] = None, +): + if isinstance(url, (list, tuple)): + url, filename = url + else: + parts = urlparse(url) + filename = os.path.basename(parts.path) + if cache_dir: + os.makedirs(cache_dir, exist_ok=True) + else: + cache_dir = get_cache_dir() + cached_file = os.path.join(cache_dir, filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def check_cached_file( + url: Union[str, List[str], Tuple[str, str]], + check_hash: bool = True, + cache_dir: Optional[Union[str, Path]] = None, +): + if isinstance(url, (list, tuple)): + url, filename = url + else: + parts = urlparse(url) + filename = os.path.basename(parts.path) + if not cache_dir: + cache_dir = get_cache_dir() + cached_file = os.path.join(cache_dir, filename) + if os.path.exists(cached_file): + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + if hash_prefix: + with open(cached_file, 'rb') as f: + hd = hashlib.sha256(f.read()).hexdigest() + if hd[:len(hash_prefix)] != hash_prefix: + return False + return True + return False + + +def has_hf_hub(necessary: bool = False): + if not _has_hf_hub and necessary: + # if no HF Hub module installed, and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return _has_hf_hub + + +def hf_split(hf_id: str): + # FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, Path]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def download_from_hf( + model_id: str, + filename: str, + cache_dir: Optional[Union[str, Path]] = None, +): + hf_model_id, hf_revision = hf_split(model_id) + return hf_hub_download( + hf_model_id, + filename, + revision=hf_revision, + cache_dir=cache_dir, + ) + + +def load_model_config_from_hf( + model_id: str, + cache_dir: Optional[Union[str, Path]] = None, +): + assert has_hf_hub(True) + cached_file = download_from_hf(model_id, 'config.json', cache_dir=cache_dir) + + hf_config = load_cfg_from_json(cached_file) + if 'pretrained_cfg' not in hf_config: + # old form, pull pretrain_cfg out of the base dict + pretrained_cfg = hf_config + hf_config = {} + hf_config['architecture'] = pretrained_cfg.pop('architecture') + hf_config['num_features'] = pretrained_cfg.pop('num_features', None) + if 'labels' in pretrained_cfg: # deprecated name for 'label_names' + pretrained_cfg['label_names'] = pretrained_cfg.pop('labels') + hf_config['pretrained_cfg'] = pretrained_cfg + + # NOTE currently discarding parent config as only arch name and pretrained_cfg used in timm right now + pretrained_cfg = hf_config['pretrained_cfg'] + pretrained_cfg['hf_hub_id'] = model_id # insert hf_hub id for pretrained weight load during model creation + pretrained_cfg['source'] = 'hf-hub' + + # model should be created with base config num_classes if its exist + if 'num_classes' in hf_config: + pretrained_cfg['num_classes'] = hf_config['num_classes'] + + # label meta-data in base config overrides saved pretrained_cfg on load + if 'label_names' in hf_config: + pretrained_cfg['label_names'] = hf_config.pop('label_names') + if 'label_descriptions' in hf_config: + pretrained_cfg['label_descriptions'] = hf_config.pop('label_descriptions') + + model_args = hf_config.get('model_args', {}) + model_name = hf_config['architecture'] + return pretrained_cfg, model_name, model_args + + +def load_state_dict_from_hf( + model_id: str, + filename: str = HF_WEIGHTS_NAME, + weights_only: bool = False, + cache_dir: Optional[Union[str, Path]] = None, +): + assert has_hf_hub(True) + hf_model_id, hf_revision = hf_split(model_id) + + # Look for .safetensors alternatives and load from it if it exists + if _has_safetensors: + for safe_filename in _get_safe_alternatives(filename): + try: + cached_safe_file = hf_hub_download( + repo_id=hf_model_id, + filename=safe_filename, + revision=hf_revision, + cache_dir=cache_dir, + ) + _logger.info( + f"[{model_id}] Safe alternative available for '{filename}' " + f"(as '{safe_filename}'). Loading weights using safetensors.") + return safetensors.torch.load_file(cached_safe_file, device="cpu") + except EntryNotFoundError: + pass + + # Otherwise, load using pytorch.load + cached_file = hf_hub_download( + hf_model_id, + filename=filename, + revision=hf_revision, + cache_dir=cache_dir, + ) + _logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.") + try: + state_dict = torch.load(cached_file, map_location='cpu', weights_only=weights_only) + except TypeError: + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict + + +def load_custom_from_hf( + model_id: str, + filename: str, + model: torch.nn.Module, + cache_dir: Optional[Union[str, Path]] = None, +): + assert has_hf_hub(True) + hf_model_id, hf_revision = hf_split(model_id) + cached_file = hf_hub_download( + hf_model_id, + filename=filename, + revision=hf_revision, + cache_dir=cache_dir, + ) + return model.load_pretrained(cached_file) + + +def save_config_for_hf( + model: torch.nn.Module, + config_path: str, + model_config: Optional[dict] = None, + model_args: Optional[dict] = None +): + model_config = model_config or {} + hf_config = {} + pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True) + # set some values at root config level + hf_config['architecture'] = pretrained_cfg.pop('architecture') + hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) + + # NOTE these attr saved for informational purposes, do not impact model build + hf_config['num_features'] = model_config.pop('num_features', model.num_features) + global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None)) + if isinstance(global_pool_type, str) and global_pool_type: + hf_config['global_pool'] = global_pool_type + + # Save class label info + if 'labels' in model_config: + _logger.warning( + "'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'." + " Renaming provided 'labels' field to 'label_names'.") + model_config.setdefault('label_names', model_config.pop('labels')) + + label_names = model_config.pop('label_names', None) + if label_names: + assert isinstance(label_names, (dict, list, tuple)) + # map label id (classifier index) -> unique label name (ie synset for ImageNet, MID for OpenImages) + # can be a dict id: name if there are id gaps, or tuple/list if no gaps. + hf_config['label_names'] = label_names + + label_descriptions = model_config.pop('label_descriptions', None) + if label_descriptions: + assert isinstance(label_descriptions, dict) + # maps label names -> descriptions + hf_config['label_descriptions'] = label_descriptions + + if model_args: + hf_config['model_args'] = model_args + + hf_config['pretrained_cfg'] = pretrained_cfg + hf_config.update(model_config) + + with config_path.open('w') as f: + json.dump(hf_config, f, indent=2) + + +def save_for_hf( + model: torch.nn.Module, + save_directory: str, + model_config: Optional[dict] = None, + model_args: Optional[dict] = None, + safe_serialization: Union[bool, Literal["both"]] = False, +): + assert has_hf_hub(True) + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True, parents=True) + + # Save model weights, either safely (using safetensors), or using legacy pytorch approach or both. + tensors = model.state_dict() + if safe_serialization is True or safe_serialization == "both": + assert _has_safetensors, "`pip install safetensors` to use .safetensors" + safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME) + if safe_serialization is False or safe_serialization == "both": + torch.save(tensors, save_directory / HF_WEIGHTS_NAME) + + config_path = save_directory / 'config.json' + save_config_for_hf( + model, + config_path, + model_config=model_config, + model_args=model_args, + ) + + +def push_to_hf_hub( + model: torch.nn.Module, + repo_id: str, + commit_message: str = 'Add model', + token: Optional[str] = None, + revision: Optional[str] = None, + private: bool = False, + create_pr: bool = False, + model_config: Optional[dict] = None, + model_card: Optional[dict] = None, + model_args: Optional[dict] = None, + safe_serialization: Union[bool, Literal["both"]] = 'both', +): + """ + Arguments: + (...) + safe_serialization (`bool` or `"both"`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + Can be set to `"both"` in order to push both safe and unsafe weights. + """ + # Create repo if it doesn't exist yet + repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True) + + # Infer complete repo_id from repo_url + # Can be different from the input `repo_id` if repo_owner was implicit + _, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url) + repo_id = f"{repo_owner}/{repo_name}" + + # Check if README file already exist in repo + try: + get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision)) + has_readme = True + except EntryNotFoundError: + has_readme = False + + # Dump model and push to Hub + with TemporaryDirectory() as tmpdir: + # Save model weights and config. + save_for_hf( + model, + tmpdir, + model_config=model_config, + model_args=model_args, + safe_serialization=safe_serialization, + ) + + # Add readme if it does not exist + if not has_readme: + model_card = model_card or {} + model_name = repo_id.split('/')[-1] + readme_path = Path(tmpdir) / "README.md" + readme_text = generate_readme(model_card, model_name) + readme_path.write_text(readme_text) + + # Upload model and return + return upload_folder( + repo_id=repo_id, + folder_path=tmpdir, + revision=revision, + create_pr=create_pr, + commit_message=commit_message, + ) + + +def generate_readme(model_card: dict, model_name: str): + readme_text = "---\n" + readme_text += "tags:\n- image-classification\n- timm\n" + readme_text += "library_name: timm\n" + readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n" + if 'details' in model_card and 'Dataset' in model_card['details']: + readme_text += 'datasets:\n' + if isinstance(model_card['details']['Dataset'], (tuple, list)): + for d in model_card['details']['Dataset']: + readme_text += f"- {d.lower()}\n" + else: + readme_text += f"- {model_card['details']['Dataset'].lower()}\n" + if 'Pretrain Dataset' in model_card['details']: + if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)): + for d in model_card['details']['Pretrain Dataset']: + readme_text += f"- {d.lower()}\n" + else: + readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n" + readme_text += "---\n" + readme_text += f"# Model card for {model_name}\n" + if 'description' in model_card: + readme_text += f"\n{model_card['description']}\n" + if 'details' in model_card: + readme_text += f"\n## Model Details\n" + for k, v in model_card['details'].items(): + if isinstance(v, (list, tuple)): + readme_text += f"- **{k}:**\n" + for vi in v: + readme_text += f" - {vi}\n" + elif isinstance(v, dict): + readme_text += f"- **{k}:**\n" + for ki, vi in v.items(): + readme_text += f" - {ki}: {vi}\n" + else: + readme_text += f"- **{k}:** {v}\n" + if 'usage' in model_card: + readme_text += f"\n## Model Usage\n" + readme_text += model_card['usage'] + readme_text += '\n' + + if 'comparison' in model_card: + readme_text += f"\n## Model Comparison\n" + readme_text += model_card['comparison'] + readme_text += '\n' + + if 'citation' in model_card: + readme_text += f"\n## Citation\n" + if not isinstance(model_card['citation'], (list, tuple)): + citations = [model_card['citation']] + else: + citations = model_card['citation'] + for c in citations: + readme_text += f"```bibtex\n{c}\n```\n" + return readme_text + + +def _get_safe_alternatives(filename: str) -> Iterable[str]: + """Returns potential safetensors alternatives for a given filename. + + Use case: + When downloading a model from the Huggingface Hub, we first look if a .safetensors file exists and if yes, we use it. + Main use case is filename "pytorch_model.bin" => check for "model.safetensors" or "pytorch_model.safetensors". + """ + if filename == HF_WEIGHTS_NAME: + yield HF_SAFE_WEIGHTS_NAME + if filename == HF_OPEN_CLIP_WEIGHTS_NAME: + yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME + if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith(".bin"): + yield filename[:-4] + ".safetensors" diff --git a/pytorch-image-models/timm/models/_registry.py b/pytorch-image-models/timm/models/_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..d8ec2f6439b6652eef72f7a271c1cfa8c4da6bdb --- /dev/null +++ b/pytorch-image-models/timm/models/_registry.py @@ -0,0 +1,352 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import fnmatch +import re +import sys +import warnings +from collections import defaultdict, deque +from copy import deepcopy +from dataclasses import replace +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple + +from ._pretrained import PretrainedCfg, DefaultCfg + +__all__ = [ + 'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs', + 'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'get_pretrained_cfg_value', 'is_model_pretrained', 'get_arch_pretrained_cfgs' +] + +_module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module: Dict[str, str] = {} # mapping of model names to module names +_model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns +_model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present +_model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects +_model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs +_model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names +_module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict) +_deprecated_models: Dict[str, Optional[str]] = {} + + +def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]: + model_name, *tag_list = model_name.split('.', 1) + tag = tag_list[0] if tag_list else no_tag + return model_name, tag + + +def get_arch_name(model_name: str) -> str: + return split_model_name_tag(model_name)[0] + + +def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]): + out = defaultdict(DefaultCfg) + default_set = set() # no tag and tags ending with * are prioritized as default + + for k, v in cfgs.items(): + if isinstance(v, dict): + v = PretrainedCfg(**v) + has_weights = v.has_weights + + model, tag = split_model_name_tag(k) + is_default_set = model in default_set + priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set) + tag = tag.strip('*') + + default_cfg = out[model] + + if priority: + default_cfg.tags.appendleft(tag) + default_set.add(model) + elif has_weights and not default_cfg.is_pretrained: + default_cfg.tags.appendleft(tag) + else: + default_cfg.tags.append(tag) + + if has_weights: + default_cfg.is_pretrained = True + + default_cfg.cfgs[tag] = v + + return out + + +def register_model(fn: Callable[..., Any]) -> Callable[..., Any]: + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] # type: ignore + + # add entries to registry dict/sets + if model_name in _model_entrypoints: + warnings.warn( + f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being ' + 'registered conflicts with an existing name. Please check if this is not expected.', + stacklevel=2, + ) + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + default_cfg = mod.default_cfgs[model_name] + if not isinstance(default_cfg, DefaultCfg): + # new style default cfg dataclass w/ multiple entries per model-arch + assert isinstance(default_cfg, dict) + # old style cfg dict per model-arch + pretrained_cfg = PretrainedCfg(**default_cfg) + default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg}) + + for tag_idx, tag in enumerate(default_cfg.tags): + is_default = tag_idx == 0 + pretrained_cfg = default_cfg.cfgs[tag] + model_name_tag = '.'.join([model_name, tag]) if tag else model_name + replace_items = dict(architecture=model_name, tag=tag if tag else None) + if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/': + # auto-complete hub name w/ architecture.tag + replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag + pretrained_cfg = replace(pretrained_cfg, **replace_items) + + if is_default: + _model_pretrained_cfgs[model_name] = pretrained_cfg + if pretrained_cfg.has_weights: + # add tagless entry if it's default and has weights + _model_has_pretrained.add(model_name) + + if tag: + _model_pretrained_cfgs[model_name_tag] = pretrained_cfg + if pretrained_cfg.has_weights: + # add model w/ tag if tag is valid + _model_has_pretrained.add(model_name_tag) + _model_with_tags[model_name].append(model_name_tag) + else: + _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances) + + _model_default_cfgs[model_name] = default_cfg + + return fn + + +def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''): + def _fn(pretrained=False, **kwargs): + assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.' + current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__ + warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2) + pretrained_cfg = kwargs.pop('pretrained_cfg', None) + return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs) + return _fn + + +def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]): + mod = sys.modules[module_name] + module_name_split = module_name.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + for deprecated, current in deprecation_map.items(): + if hasattr(mod, '__all__'): + mod.__all__.append(deprecated) + current_fn = None + current_tag = '' + if current: + current_name, current_tag = split_model_name_tag(current) + current_fn = getattr(mod, current_name) + deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag) + setattr(mod, deprecated, deprecated_entrypoint_fn) + _model_entrypoints[deprecated] = deprecated_entrypoint_fn + _model_to_module[deprecated] = module_name + _module_to_models[module_name].add(deprecated) + _deprecated_models[deprecated] = current + _module_to_deprecated_models[module_name][deprecated] = current + + +def _natural_key(string_: str) -> List[Union[int, str]]: + """See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def _expand_filter(filter: str): + """ expand a 'base_filter' to 'base_filter.*' if no tag portion""" + filter_base, filter_tag = split_model_name_tag(filter) + if not filter_tag: + return ['.'.join([filter_base, '*']), filter] + else: + return [filter] + + +def list_models( + filter: Union[str, List[str]] = '', + module: Union[str, List[str]] = '', + pretrained: bool = False, + exclude_filters: Union[str, List[str]] = '', + name_matches_cfg: bool = False, + include_tags: Optional[bool] = None, +) -> List[str]: + """ Return list of available model names, sorted alphabetically + + Args: + filter - Wildcard filter string that works with fnmatch + module - Limit model selection to a specific submodule (ie 'vision_transformer') + pretrained - Include only models with valid pretrained weights if True + exclude_filters - Wildcard filters to exclude models after including them with filter + name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases) + include_tags - Include pretrained tags in model names (model.tag). If None, defaults + set to True when pretrained=True else False (default: None) + + Returns: + models - The sorted list of models + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if filter: + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + else: + include_filters = [] + + if include_tags is None: + # FIXME should this be default behaviour? or default to include_tags=True? + include_tags = pretrained + + if not module: + all_models: Set[str] = set(_model_entrypoints.keys()) + else: + if isinstance(module, str): + all_models: Set[str] = _module_to_models[module] + else: + assert isinstance(module, Sequence) + all_models: Set[str] = set() + for m in module: + all_models.update(_module_to_models[m]) + all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings + + if include_tags: + # expand model names to include names w/ pretrained tags + models_with_tags: Set[str] = set() + for m in all_models: + models_with_tags.update(_model_with_tags[m]) + all_models = models_with_tags + # expand include and exclude filters to include a '.*' for proper match if no tags in filter + include_filters = [ef for f in include_filters for ef in _expand_filter(f)] + exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)] + + if include_filters: + models: Set[str] = set() + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = models.union(include_models) + else: + models = all_models + + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = models.difference(exclude_models) + + if pretrained: + models = _model_has_pretrained.intersection(models) + + if name_matches_cfg: + models = set(_model_pretrained_cfgs).intersection(models) + + return sorted(models, key=_natural_key) + + +def list_pretrained( + filter: Union[str, List[str]] = '', + exclude_filters: str = '', +) -> List[str]: + return list_models( + filter=filter, + pretrained=True, + exclude_filters=exclude_filters, + include_tags=True, + ) + + +def get_deprecated_models(module: str = '') -> Dict[str, str]: + all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models + return deepcopy(all_deprecated) + + +def is_model(model_name: str) -> bool: + """ Check if a model name exists + """ + arch_name = get_arch_name(model_name) + return arch_name in _model_entrypoints + + +def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]: + """Fetch a model entrypoint for specified model name + """ + arch_name = get_arch_name(model_name) + if module_filter and arch_name not in _module_to_models.get(module_filter, {}): + raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.') + return _model_entrypoints[arch_name] + + +def list_modules() -> List[str]: + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return sorted(modules) + + +def is_model_in_modules( + model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]] +) -> bool: + """Check if a model exists within a subset of modules + + Args: + model_name - name of model to check + module_names - names of modules to search in + """ + arch_name = get_arch_name(model_name) + assert isinstance(module_names, (tuple, list, set)) + return any(arch_name in _module_to_models[n] for n in module_names) + + +def is_model_pretrained(model_name: str) -> bool: + return model_name in _model_has_pretrained + + +def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]: + if model_name in _model_pretrained_cfgs: + return deepcopy(_model_pretrained_cfgs[model_name]) + arch_name, tag = split_model_name_tag(model_name) + if arch_name in _model_default_cfgs: + # if model arch exists, but the tag is wrong, error out + raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.') + if allow_unregistered: + # if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created + return None + raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.') + + +def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]: + """ Get a specific model default_cfg value by key. None if key doesn't exist. + """ + cfg = get_pretrained_cfg(model_name, allow_unregistered=False) + return getattr(cfg, cfg_key, None) + + +def get_arch_pretrained_cfgs(model_name: str) -> Dict[str, PretrainedCfg]: + """ Get all pretrained cfgs for a given architecture. + """ + arch_name, _ = split_model_name_tag(model_name) + model_names = _model_with_tags[arch_name] + cfgs = {m: _model_pretrained_cfgs[m] for m in model_names} + return cfgs diff --git a/pytorch-image-models/timm/models/beit.py b/pytorch-image-models/timm/models/beit.py new file mode 100644 index 0000000000000000000000000000000000000000..c47ea395e845038632909c84b8a433fc4fb0c04a --- /dev/null +++ b/pytorch-image-models/timm/models/beit.py @@ -0,0 +1,716 @@ +""" BEiT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) + +Model from official source: https://github.com/microsoft/unilm/tree/master/beit + +@inproceedings{beit, +title={{BEiT}: {BERT} Pre-Training of Image Transformers}, +author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, +booktitle={International Conference on Learning Representations}, +year={2022}, +url={https://openreview.net/forum?id=p-BhZSz59o4} +} + +BEiT-v2 from https://github.com/microsoft/unilm/tree/master/beit2 + +@article{beitv2, +title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers}, +author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei}, +year={2022}, +eprint={2208.06366}, +archivePrefix={arXiv}, +primaryClass={cs.CV} +} + +At this point only the 1k fine-tuned classification weights and model configs have been added, +see original source above for pre-training models and procedure. + +Modifications by / Copyright 2021 Ross Wightman, original copyrights below +""" +# -------------------------------------------------------- +# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) +# Github source: https://github.com/microsoft/unilm/tree/master/beit +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# By Hangbo Bao +# Based on timm and DeiT code bases +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' + +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn +from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table, ndgrid + + +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._registry import generate_default_cfgs, register_model + +__all__ = ['Beit'] + + +def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor: + num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + # cls to token & token 2 cls & cls to cls + # get pair-wise relative position index for each token inside the window + window_area = window_size[0] * window_size[1] + coords = torch.stack(ndgrid(torch.arange(window_size[0]), torch.arange(window_size[1]))) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = num_relative_distance - 3 + relative_position_index[0:, 0] = num_relative_distance - 2 + relative_position_index[0, 0] = num_relative_distance - 1 + return relative_position_index + + +class Attention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qkv_bias_separate: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + window_size: Optional[Tuple[int, int]] = None, + attn_head_dim: Optional[int] = None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + self.qkv_bias_separate = qkv_bias_separate + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + self.register_buffer("relative_position_index", gen_relative_position_index(window_size), persistent=False) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def _get_rel_pos_bias(self): + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None): + B, N, C = x.shape + + if self.q_bias is None: + qkv = self.qkv(x) + else: + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) + if self.qkv_bias_separate: + qkv = self.qkv(x) + qkv += qkv_bias + else: + qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim + + if self.fused_attn: + rel_pos_bias = None + if self.relative_position_bias_table is not None: + rel_pos_bias = self._get_rel_pos_bias() + if shared_rel_pos_bias is not None: + rel_pos_bias = rel_pos_bias + shared_rel_pos_bias + elif shared_rel_pos_bias is not None: + rel_pos_bias = shared_rel_pos_bias + + x = F.scaled_dot_product_attention( + q, k, v, + attn_mask=rel_pos_bias, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + attn = attn + self._get_rel_pos_bias() + if shared_rel_pos_bias is not None: + attn = attn + shared_rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + dim: int, + num_heads: int, + qkv_bias: bool = False, + mlp_ratio: float = 4., + scale_mlp: bool = False, + swiglu_mlp: bool = False, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + init_values: Optional[float] = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + window_size: Optional[Tuple[int, int]] = None, + attn_head_dim: Optional[int] = None, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + window_size=window_size, + attn_head_dim=attn_head_dim, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + if swiglu_mlp: + self.mlp = SwiGLU( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + else: + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + if init_values: + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.window_area = window_size[0] * window_size[1] + num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) + # trunc_normal_(self.relative_position_bias_table, std=.02) + self.register_buffer("relative_position_index", gen_relative_position_index(window_size)) + + def forward(self): + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class Beit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + qkv_bias: bool = True, + mlp_ratio: float = 4., + swiglu_mlp: bool = False, + scale_mlp: bool = False, + drop_rate: float = 0., + pos_drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + norm_layer: Callable = LayerNorm, + init_values: Optional[float] = None, + use_abs_pos_emb: bool = True, + use_rel_pos_bias: bool = False, + use_shared_rel_pos_bias: bool = False, + head_init_scale: float = 0.001, + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.num_prefix_tokens = 1 + self.grad_checkpointing = False + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias( + window_size=self.patch_embed.grid_size, + num_heads=num_heads, + ) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + mlp_ratio=mlp_ratio, + scale_mlp=scale_mlp, + swiglu_mlp=swiglu_mlp, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + window_size=self.patch_embed.grid_size if use_rel_pos_bias else None, + ) + for i in range(depth)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] + + use_fc_norm = self.global_pool == 'avg' + self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(head_init_scale) + self.head.bias.data.mul_(head_init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + nwd = {'pos_embed', 'cls_token'} + for n, _ in self.named_parameters(): + if 'relative_position_bias_table' in n: + nwd.add(n) + return nwd + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], + ) + return matcher + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + return_prefix_tokens: bool = False, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if an int, if is a sequence, select by matching indices + return_prefix_tokens: Return both prefix and spatial intermediate tokens + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x) + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x, shared_rel_pos_bias=rel_pos_bias) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if self.num_prefix_tokens: + # split prefix (e.g. class, distill) and spatial feature tokens + prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] + intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] + if reshape: + # reshape to BCHW output format + H, W = self.patch_embed.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + if not torch.jit.is_scripting() and return_prefix_tokens: + # return_prefix not support in torchscript due to poor type handling + intermediates = list(zip(intermediates, prefix_tokens)) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.fc_norm = nn.Identity() + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) + else: + x = blk(x, shared_rel_pos_bias=rel_pos_bias) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth', + hf_hub_id='timm/'), + 'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_base_patch16_224.in22k_ft_in22k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth', + hf_hub_id='timm/', + num_classes=21841, + ), + 'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth', + hf_hub_id='timm/'), + 'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth', + hf_hub_id='timm/', + input_size=(3, 512, 512), crop_pct=1.0, + ), + 'beit_large_patch16_224.in22k_ft_in22k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth', + hf_hub_id='timm/', + num_classes=21841, + ), + + 'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_base_patch16_224.in1k_ft_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft1k.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_base_patch16_224.in1k_ft_in22k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth', + hf_hub_id='timm/', + num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth', + hf_hub_id='timm/', + crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_large_patch16_224.in1k_ft_in1k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft1k.pth', + hf_hub_id='timm/', + crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_large_patch16_224.in1k_ft_in22k': _cfg( + #url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth', + hf_hub_id='timm/', + num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), +}) + + +def checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True): + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('module', state_dict) + # beit v2 didn't strip module + + out_dict = {} + for k, v in state_dict.items(): + if 'relative_position_index' in k: + continue + if 'patch_embed.proj.weight' in k: + O, I, H, W = model.patch_embed.proj.weight.shape + if v.shape[-1] != W or v.shape[-2] != H: + v = resample_patch_embed( + v, + (H, W), + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # To resize pos embedding when using model at different size from pretrained weights + num_prefix_tokens = 1 + v = resample_abs_pos_embed( + v, + new_size=model.patch_embed.grid_size, + num_prefix_tokens=num_prefix_tokens, + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + elif k.endswith('relative_position_bias_table'): + m = model.get_submodule(k[:-29]) + if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: + v = resize_rel_pos_bias_table( + v, + new_window_size=m.window_size, + new_bias_shape=m.relative_position_bias_table.shape, + ) + out_dict[k] = v + return out_dict + + +def _create_beit(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + Beit, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +@register_model +def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1) + model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit: + model_args = dict( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1) + model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit: + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5) + model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit: + model_args = dict( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5) + model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit: + model_args = dict( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5) + model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5) + model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit: + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5) + model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/byoanet.py b/pytorch-image-models/timm/models/byoanet.py new file mode 100644 index 0000000000000000000000000000000000000000..683ed0ca0177fbb4afa4913856bb638b212ad6e0 --- /dev/null +++ b/pytorch-image-models/timm/models/byoanet.py @@ -0,0 +1,455 @@ +""" Bring-Your-Own-Attention Network + +A flexible network w/ dataclass based config for stacking NN blocks including +self-attention (or similar) layers. + +Currently used to implement experimental variants of: + * Bottleneck Transformers + * Lambda ResNets + * HaloNets + +Consider all of the models definitions here as experimental WIP and likely to change. + +Hacked together by / copyright Ross Wightman, 2021. +""" +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks + +__all__ = [] + + +model_cfgs = dict( + + botnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + sebotnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + eca_botnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_kwargs=dict(dim_head=16) + ), + + halonet_h1=ByoModelCfg( + blocks=( + ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), + ), + stem_chs=64, + stem_type='7x7', + stem_pool='maxpool', + + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3), + ), + halonet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2) + ), + sehalonet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + halonet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + eca_halonext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + + lambda_resnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet26rpt_256=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=None) + ), + + # experimental + haloregnetz_b=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33) + ), + + # experimental + lamhalobotnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), + halo2botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), +) + + +def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, 'min_input_size': (3, 224, 224), + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # GPU-Efficient (ResNet) weights + 'botnet26t_256.c1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'sebotnet33ts_256.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'botnet50ts_256.untrained': _cfg( + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256.c1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + + 'halonet_h1.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'halonet26t.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'sehalonet33ts.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'halonet50ts.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'eca_halonext26ts.c1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + + 'lambda_resnet26t.c1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', + hf_hub_id='timm/', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'lambda_resnet50ts.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', + hf_hub_id='timm/', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), + 'lambda_resnet26rpt_256.c1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + + 'haloregnetz_b.ra3_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), + + 'lamhalobotnet50ts_256.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'halo2botnet50ts_256.a1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', + hf_hub_id='timm/', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), +}) + + +@register_model +def botnet26t_256(pretrained=False, **kwargs) -> ByobNet: + """ Bottleneck Transformer w/ ResNet26-T backbone. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sebotnet33ts_256(pretrained=False, **kwargs) -> ByobNet: + """ Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, + """ + return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: + """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs) -> ByobNet: + """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet_h1(pretrained=False, **kwargs) -> ByobNet: + """ HaloNet-H1. Halo attention in all stages as per the paper. + NOTE: This runs very slowly! + """ + return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) + + +@register_model +def halonet26t(pretrained=False, **kwargs) -> ByobNet: + """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages + """ + return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sehalonet33ts(pretrained=False, **kwargs) -> ByobNet: + """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. + """ + return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet50ts(pretrained=False, **kwargs) -> ByobNet: + """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_halonext26ts(pretrained=False, **kwargs) -> ByobNet: + """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26t(pretrained=False, **kwargs) -> ByobNet: + """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet50ts(pretrained=False, **kwargs) -> ByobNet: + """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26rpt_256(pretrained=False, **kwargs) -> ByobNet: + """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) + + +@register_model +def haloregnetz_b(pretrained=False, **kwargs) -> ByobNet: + """ Halo + RegNetZ + """ + return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) + + +@register_model +def lamhalobotnet50ts_256(pretrained=False, **kwargs) -> ByobNet: + """ Combo Attention (Lambda + Halo + Bot) Network + """ + return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def halo2botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: + """ Combo Attention (Halo + Halo + Bot) Network + """ + return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/byobnet.py b/pytorch-image-models/timm/models/byobnet.py new file mode 100644 index 0000000000000000000000000000000000000000..93ca5e72c913780f1ec2972c5a920839d4d40dc2 --- /dev/null +++ b/pytorch-image-models/timm/models/byobnet.py @@ -0,0 +1,2784 @@ +""" Bring-Your-Own-Blocks Network + +A flexible network w/ dataclass based config for stacking those NN blocks. + +This model is currently used to implement the following networks: + +GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). +Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 + +RepVGG - repvgg_* +Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT + +MobileOne - mobileone_* +Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040 +Code and weights: https://github.com/apple/ml-mobileone, licensed MIT + +In all cases the models have been modified to fit within the design of ByobNet. I've remapped +the original weights and verified accuracies. + +For GPU Efficient nets, I used the original names for the blocks since they were for the most part +the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some +changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. + +A significant number of different network archs can be implemented here, including variants of the +above nets that include attention. + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field, replace +from functools import partial +from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from timm.layers import ( + ClassifierHead, NormMlpClassifierHead, ConvNormAct, BatchNormAct2d, EvoNorm2dS0a, + AttentionPool2d, RotAttentionPool2d, DropPath, AvgPool2dSame, + create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, +) +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply, checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] + + +@dataclass +class ByoBlockCfg: + type: Union[str, nn.Module] + d: int # block depth (number of block repeats in stage) + c: int # number of output channels for each block in stage + s: int = 2 # stride of stage (first block) + gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 + br: float = 1. # bottleneck-ratio of blocks in stage + + # NOTE: these config items override the model cfgs that are applied to all blocks by default + attn_layer: Optional[str] = None + attn_kwargs: Optional[Dict[str, Any]] = None + self_attn_layer: Optional[str] = None + self_attn_kwargs: Optional[Dict[str, Any]] = None + block_kwargs: Optional[Dict[str, Any]] = None + + +@dataclass +class ByoModelCfg: + blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] + downsample: str = 'conv1x1' + stem_type: str = '3x3' + stem_pool: Optional[str] = 'maxpool' + stem_chs: Union[int, List[int], Tuple[int, ...]] = 32 + width_factor: float = 1.0 + num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation + + # layer config + act_layer: str = 'relu' + norm_layer: str = 'batchnorm' + aa_layer: str = '' + + # Head config + head_hidden_size: Optional[int] = None # feat dim of MLP head or AttentionPool output + head_type: str = 'classifier' + + # Block config + # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there + attn_layer: Optional[str] = None + attn_kwargs: dict = field(default_factory=lambda: dict()) + self_attn_layer: Optional[str] = None + self_attn_kwargs: dict = field(default_factory=lambda: dict()) + block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) + + +def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): + c = (64, 128, 256, 512) + group_size = 0 + if groups > 0: + group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 + bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) + return bcfg + + +def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1., 1., 1., 1.), se_blocks=(), num_conv_branches=1): + c = (64, 128, 256, 512) + prev_c = min(64, c[0] * wf[0]) + se_blocks = se_blocks or (0,) * len(d) + bcfg = [] + for d, c, w, se in zip(d, c, wf, se_blocks): + scfg = [] + for i in range(d): + out_c = c * w + bk = dict(num_conv_branches=num_conv_branches) + ak = {} + if i >= d - se: + ak['attn_layer'] = 'se' + scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block + scfg += [ByoBlockCfg( + type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block + prev_c = out_c + bcfg += [scfg] + return bcfg + + +def interleave_blocks( + types: Tuple[str, str], d, + every: Union[int, List[int]] = 1, + first: bool = False, + **kwargs, +) -> Tuple[ByoBlockCfg]: + """ interleave 2 block types in stack + """ + assert len(types) == 2 + if isinstance(every, int): + every = list(range(0 if first else every, d, every + 1)) + if not every: + every = [d - 1] + set(every) + blocks = [] + for i in range(d): + block_type = types[1] if i in every else types[0] + blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] + return tuple(blocks) + + +def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: + if not isinstance(stage_blocks_cfg, Sequence): + stage_blocks_cfg = (stage_blocks_cfg,) + block_cfgs = [] + for i, cfg in enumerate(stage_blocks_cfg): + block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] + return block_cfgs + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +@dataclass +class LayerFn: + conv_norm_act: Callable = ConvNormAct + norm_act: Callable = BatchNormAct2d + act: Callable = nn.ReLU + attn: Optional[Callable] = None + self_attn: Optional[Callable] = None + + +class DownsampleAvg(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: int = 1, + apply_act: bool = False, + layers: LayerFn = None, + ): + """ AvgPool Downsampling as in 'D' ResNet variants.""" + super(DownsampleAvg, self).__init__() + layers = layers or LayerFn() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) + + def forward(self, x): + return self.conv(self.pool(x)) + + +def create_shortcut( + downsample_type: str, + in_chs: int, + out_chs: int, + stride: int, + dilation: Tuple[int, int], + layers: LayerFn, + **kwargs, +): + assert downsample_type in ('avg', 'conv1x1', '') + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + if not downsample_type: + return None # no shortcut + elif downsample_type == 'avg': + return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) + else: + return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) + else: + return nn.Identity() # identity shortcut + + +class BasicBlock(nn.Module): + """ ResNet Basic Block - kxk + kxk + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + group_size: Optional[int] = None, + bottle_ratio: float = 1.0, + downsample: str = 'avg', + attn_last: bool = True, + linear_out: bool = False, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ): + super(BasicBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs, out_chs, + stride=stride, dilation=dilation, apply_act=False, layers=layers, + ) + + self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, + dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False, + ) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class BottleneckBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1., + group_size: Optional[int] = None, + downsample: str = 'avg', + attn_last: bool = False, + linear_out: bool = False, + extra_conv: bool = False, + bottle_in: bool = False, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ): + super(BottleneckBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs, out_chs, + stride=stride, dilation=dilation, apply_act=False, layers=layers, + ) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, + stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, + ) + if extra_conv: + self.conv2b_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) + else: + self.conv2b_kxk = nn.Identity() + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.conv2b_kxk(x) + x = self.attn(x) + x = self.conv3_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class DarkBlock(nn.Module): + """ DarkNet-like (1x1 + 3x3 w/ stride) block + + The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. + This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet + uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). + + If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) + for more optimal compute. + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1.0, + group_size: Optional[int] = None, + downsample: str = 'avg', + attn_last: bool = True, + linear_out: bool = False, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ): + super(DarkBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs, out_chs, + stride=stride, dilation=dilation, apply_act=False, layers=layers, + ) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, + stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, + ) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class EdgeBlock(nn.Module): + """ EdgeResidual-like (3x3 + 1x1) block + + A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. + Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is + intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. + + FIXME is there a more common 3x3 + 1x1 conv block to name this after? + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1.0, + group_size: Optional[int] = None, + downsample: str = 'avg', + attn_last: bool = False, + linear_out: bool = False, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ): + super(EdgeBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs, out_chs, + stride=stride, dilation=dilation, apply_act=False, layers=layers, + ) + self.conv1_kxk = layers.conv_norm_act( + in_chs, mid_chs, kernel_size, + stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, + ) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv2_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class RepVggBlock(nn.Module): + """ RepVGG Block. + + Adapted from impl at https://github.com/DingXiaoH/RepVGG + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1.0, + group_size: Optional[int] = None, + downsample: str = '', + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + inference_mode: bool = False + ): + super(RepVggBlock, self).__init__() + self.groups = groups = num_groups(group_size, in_chs) + layers = layers or LayerFn() + + if inference_mode: + self.reparam_conv = nn.Conv2d( + in_channels=in_chs, + out_channels=out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + groups=groups, + bias=True, + ) + else: + self.reparam_conv = None + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + self.conv_kxk = layers.conv_norm_act( + in_chs, out_chs, kernel_size, + stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, + ) + self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.act = layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + # NOTE this init overrides that base model init with specific changes for the block type + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + nn.init.normal_(m.weight, .1, .1) + nn.init.normal_(m.bias, 0, .1) + if hasattr(self.attn, 'reset_parameters'): + self.attn.reset_parameters() + + def forward(self, x): + if self.reparam_conv is not None: + return self.act(self.attn(self.reparam_conv(x))) + + if self.identity is None: + x = self.conv_1x1(x) + self.conv_kxk(x) + else: + identity = self.identity(x) + x = self.conv_1x1(x) + self.conv_kxk(x) + x = self.drop_path(x) # not in the paper / official impl, experimental + x += identity + x = self.attn(x) # no attn in the paper / official impl, experimental + return self.act(x) + + def reparameterize(self): + """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - + https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched + architecture used at training time to obtain a plain CNN-like structure + for inference. + """ + if self.reparam_conv is not None: + return + + kernel, bias = self._get_kernel_bias() + self.reparam_conv = nn.Conv2d( + in_channels=self.conv_kxk.conv.in_channels, + out_channels=self.conv_kxk.conv.out_channels, + kernel_size=self.conv_kxk.conv.kernel_size, + stride=self.conv_kxk.conv.stride, + padding=self.conv_kxk.conv.padding, + dilation=self.conv_kxk.conv.dilation, + groups=self.conv_kxk.conv.groups, + bias=True, + ) + self.reparam_conv.weight.data = kernel + self.reparam_conv.bias.data = bias + + # Delete un-used branches + for name, para in self.named_parameters(): + if 'reparam_conv' in name: + continue + para.detach_() + self.__delattr__('conv_kxk') + self.__delattr__('conv_1x1') + self.__delattr__('identity') + self.__delattr__('drop_path') + + def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: + """ Method to obtain re-parameterized kernel and bias. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 + """ + # get weights and bias of scale branch + kernel_1x1 = 0 + bias_1x1 = 0 + if self.conv_1x1 is not None: + kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1) + # Pad scale branch kernel to match conv branch kernel size. + pad = self.conv_kxk.conv.kernel_size[0] // 2 + kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad]) + + # get weights and bias of skip branch + kernel_identity = 0 + bias_identity = 0 + if self.identity is not None: + kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) + + # get weights and bias of conv branches + kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk) + + kernel_final = kernel_conv + kernel_1x1 + kernel_identity + bias_final = bias_conv + bias_1x1 + bias_identity + return kernel_final, bias_final + + def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: + """ Method to fuse batchnorm layer with preceeding conv layer. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 + """ + if isinstance(branch, ConvNormAct): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, 'id_tensor'): + in_chs = self.conv_kxk.conv.in_channels + input_dim = in_chs // self.groups + kernel_size = self.conv_kxk.conv.kernel_size + kernel_value = torch.zeros_like(self.conv_kxk.conv.weight) + for i in range(in_chs): + kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 + self.id_tensor = kernel_value + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + +class MobileOneBlock(nn.Module): + """ MobileOne building block. + + This block has a multi-branched architecture at train-time + and plain-CNN style architecture at inference time + For more details, please refer to our paper: + `An Improved One millisecond Mobile Backbone` - + https://arxiv.org/pdf/2206.04040.pdf + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1.0, # unused + group_size: Optional[int] = None, + downsample: str = '', # unused + inference_mode: bool = False, + num_conv_branches: int = 1, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ) -> None: + """ Construct a MobileOneBlock module. + """ + super(MobileOneBlock, self).__init__() + self.num_conv_branches = num_conv_branches + self.groups = groups = num_groups(group_size, in_chs) + layers = layers or LayerFn() + + if inference_mode: + self.reparam_conv = nn.Conv2d( + in_channels=in_chs, + out_channels=out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + groups=groups, + bias=True) + else: + self.reparam_conv = None + + # Re-parameterizable skip connection + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + + # Re-parameterizable conv branches + convs = [] + for _ in range(self.num_conv_branches): + convs.append(layers.conv_norm_act( + in_chs, out_chs, kernel_size=kernel_size, + stride=stride, groups=groups, apply_act=False)) + self.conv_kxk = nn.ModuleList(convs) + + # Re-parameterizable scale branch + self.conv_scale = None + if kernel_size > 1: + self.conv_scale = layers.conv_norm_act( + in_chs, out_chs, kernel_size=1, + stride=stride, groups=groups, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.act = layers.act(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ Apply forward pass. """ + # Inference mode forward pass. + if self.reparam_conv is not None: + return self.act(self.attn(self.reparam_conv(x))) + + # Multi-branched train-time forward pass. + # Skip branch output + identity_out = 0 + if self.identity is not None: + identity_out = self.identity(x) + + # Scale branch output + scale_out = 0 + if self.conv_scale is not None: + scale_out = self.conv_scale(x) + + # Other branches + out = scale_out + for ck in self.conv_kxk: + out += ck(x) + out = self.drop_path(out) + out += identity_out + + return self.act(self.attn(out)) + + def reparameterize(self): + """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - + https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched + architecture used at training time to obtain a plain CNN-like structure + for inference. + """ + if self.reparam_conv is not None: + return + + kernel, bias = self._get_kernel_bias() + self.reparam_conv = nn.Conv2d( + in_channels=self.conv_kxk[0].conv.in_channels, + out_channels=self.conv_kxk[0].conv.out_channels, + kernel_size=self.conv_kxk[0].conv.kernel_size, + stride=self.conv_kxk[0].conv.stride, + padding=self.conv_kxk[0].conv.padding, + dilation=self.conv_kxk[0].conv.dilation, + groups=self.conv_kxk[0].conv.groups, + bias=True) + self.reparam_conv.weight.data = kernel + self.reparam_conv.bias.data = bias + + # Delete un-used branches + for name, para in self.named_parameters(): + if 'reparam_conv' in name: + continue + para.detach_() + self.__delattr__('conv_kxk') + self.__delattr__('conv_scale') + self.__delattr__('identity') + self.__delattr__('drop_path') + + def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: + """ Method to obtain re-parameterized kernel and bias. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 + """ + # get weights and bias of scale branch + kernel_scale = 0 + bias_scale = 0 + if self.conv_scale is not None: + kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) + # Pad scale branch kernel to match conv branch kernel size. + pad = self.conv_kxk[0].conv.kernel_size[0] // 2 + kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) + + # get weights and bias of skip branch + kernel_identity = 0 + bias_identity = 0 + if self.identity is not None: + kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) + + # get weights and bias of conv branches + kernel_conv = 0 + bias_conv = 0 + for ix in range(self.num_conv_branches): + _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) + kernel_conv += _kernel + bias_conv += _bias + + kernel_final = kernel_conv + kernel_scale + kernel_identity + bias_final = bias_conv + bias_scale + bias_identity + return kernel_final, bias_final + + def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: + """ Method to fuse batchnorm layer with preceeding conv layer. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 + """ + if isinstance(branch, ConvNormAct): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, 'id_tensor'): + in_chs = self.conv_kxk[0].conv.in_channels + input_dim = in_chs // self.groups + kernel_size = self.conv_kxk[0].conv.kernel_size + kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight) + for i in range(in_chs): + kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 + self.id_tensor = kernel_value + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + +class SelfAttnBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + bottle_ratio: float = 1., + group_size: Optional[int] = None, + downsample: str = 'avg', + extra_conv: bool = False, + linear_out: bool = False, + bottle_in: bool = False, + post_attn_na: bool = True, + feat_size: Optional[Tuple[int, int]] = None, + layers: LayerFn = None, + drop_block: Callable = None, + drop_path_rate: float = 0., + ): + super(SelfAttnBlock, self).__init__() + assert layers is not None + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs, out_chs, + stride=stride, dilation=dilation, apply_act=False, layers=layers, + ) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + if extra_conv: + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, + stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, + ) + stride = 1 # striding done via conv if enabled + else: + self.conv2_kxk = nn.Identity() + opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) + # FIXME need to dilate self attn to have dilated network support, moop moop + self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) + self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + if hasattr(self.self_attn, 'reset_parameters'): + self.self_attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.self_attn(x) + x = self.post_attn(x) + x = self.conv3_1x1(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +_block_registry = dict( + basic=BasicBlock, + bottle=BottleneckBlock, + dark=DarkBlock, + edge=EdgeBlock, + rep=RepVggBlock, + one=MobileOneBlock, + self_attn=SelfAttnBlock, +) + + +def register_block(block_type:str, block_fn: nn.Module): + _block_registry[block_type] = block_fn + + +def create_block(block: Union[str, nn.Module], **kwargs): + if isinstance(block, (nn.Module, partial)): + return block(**kwargs) + assert block in _block_registry, f'Unknown block type ({block}' + return _block_registry[block](**kwargs) + + +class Stem(nn.Sequential): + + def __init__( + self, + in_chs: int, + out_chs: Union[int, List[int], Tuple[int, ...]], + kernel_size: int = 3, + stride: int = 4, + pool: str = 'maxpool', + num_rep: int = 3, + num_act: Optional[int] = None, + chs_decay: float = 0.5, + layers: LayerFn = None, + ): + super().__init__() + assert stride in (2, 4) + layers = layers or LayerFn() + + if isinstance(out_chs, (list, tuple)): + num_rep = len(out_chs) + stem_chs = out_chs + else: + stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] + + self.stride = stride + self.feature_info = [] # track intermediate features + prev_feat = '' + stem_strides = [2] + [1] * (num_rep - 1) + if stride == 4 and not pool: + # set last conv in stack to be strided if stride == 4 and no pooling layer + stem_strides[-1] = 2 + + num_act = num_rep if num_act is None else num_act + # if num_act < num_rep, first convs in stack won't have bn + act + stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act + prev_chs = in_chs + curr_stride = 1 + last_feat_idx = -1 + for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): + layer_fn = layers.conv_norm_act if na else create_conv2d + conv_name = f'conv{i + 1}' + if i > 0 and s > 1: + last_feat_idx = i - 1 + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) + self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) + prev_chs = ch + curr_stride *= s + prev_feat = conv_name + + if pool: + pool = pool.lower() + assert pool in ('max', 'maxpool', 'avg', 'avgpool', 'max2', 'avg2') + last_feat_idx = i + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) + if pool == 'max2': + self.add_module('pool', nn.MaxPool2d(2)) + elif pool == 'avg2': + self.add_module('pool', nn.AvgPool2d(2)) + elif 'max' in pool: + self.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + elif 'avg' in pool: + self.add_module('pool', nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)) + curr_stride *= 2 + prev_feat = 'pool' + + self.last_feat_idx = last_feat_idx if last_feat_idx >= 0 else None + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) + assert curr_stride == stride + + def forward_intermediates(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + intermediate: Optional[torch.Tensor] = None + for i, m in enumerate(self): + x = m(x) + if self.last_feat_idx is not None and i == self.last_feat_idx: + intermediate = x + return x, intermediate + + +def create_byob_stem( + in_chs: int, + out_chs: int, + stem_type: str = '', + pool_type: str = '', + feat_prefix: str = 'stem', + layers: LayerFn = None, +): + layers = layers or LayerFn() + assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3') + if 'quad' in stem_type: + # based on NFNet stem, stack of 4 3x3 convs + num_act = 2 if 'quad2' in stem_type else None + stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) + elif 'tiered' in stem_type: + # 3x3 stack of 3 convs as in my ResNet-T + stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) + elif 'deep' in stem_type: + # 3x3 stack of 3 convs as in ResNet-D + stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) + elif 'rep' in stem_type: + stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) + elif 'one' in stem_type: + stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers) + elif '7x7' in stem_type: + # 7x7 stem conv as in ResNet + if pool_type: + stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) + else: + if isinstance(out_chs, (tuple, list)): + stem = Stem(in_chs, out_chs, 3, pool=pool_type, layers=layers) + else: + # 3x3 stem conv as in RegNet is the default + if pool_type: + stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) + + if isinstance(stem, Stem): + feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] + else: + feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix, stage=0)] + return stem, feature_info + + +def reduce_feat_size(feat_size, stride=2): + return None if feat_size is None else tuple([s // stride for s in feat_size]) + + +def override_kwargs(block_kwargs, model_kwargs): + """ Override model level attn/self-attn/block kwargs w/ block level + + NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs + for the block if set to anything that isn't None. + + i.e. an empty block_kwargs dict will remove kwargs set at model level for that block + """ + out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs + return out_kwargs or {} # make sure None isn't returned + + +def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): + layer_fns = block_kwargs['layers'] + + # override attn layer / args with block local config + attn_set = block_cfg.attn_layer is not None + if attn_set or block_cfg.attn_kwargs is not None: + # override attn layer config + if attn_set and not block_cfg.attn_layer: + # empty string for attn_layer type will disable attn for this block + attn_layer = None + else: + attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) + attn_layer = block_cfg.attn_layer or model_cfg.attn_layer + attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None + layer_fns = replace(layer_fns, attn=attn_layer) + + # override self-attn layer / args with block local cfg + self_attn_set = block_cfg.self_attn_layer is not None + if self_attn_set or block_cfg.self_attn_kwargs is not None: + # override attn layer config + if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' + # empty string for self_attn_layer type will disable attn for this block + self_attn_layer = None + else: + self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) + self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer + self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ + if self_attn_layer is not None else None + layer_fns = replace(layer_fns, self_attn=self_attn_layer) + + block_kwargs['layers'] = layer_fns + + # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set + block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) + + +def create_byob_stages( + cfg: ByoModelCfg, + drop_path_rate: float, + output_stride: int, + stem_feat: Dict[str, Any], + feat_size: Optional[int] = None, + layers: Optional[LayerFn] = None, + block_kwargs_fn: Optional[Callable] = update_block_kwargs, +): + + layers = layers or LayerFn() + feature_info = [] + block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] + depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + stages = [] + for stage_idx, stage_block_cfgs in enumerate(block_cfgs): + stride = stage_block_cfgs[0].s + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx, block_cfg in enumerate(stage_block_cfgs): + out_chs = make_divisible(block_cfg.c * cfg.width_factor) + group_size = block_cfg.gs + if isinstance(group_size, Callable): + group_size = group_size(out_chs, block_idx) + block_kwargs = dict( # Blocks used in this model must accept these arguments + in_chs=prev_chs, + out_chs=out_chs, + stride=stride if block_idx == 0 else 1, + dilation=(first_dilation, dilation), + group_size=group_size, + bottle_ratio=block_cfg.br, + downsample=cfg.downsample, + drop_path_rate=dpr[stage_idx][block_idx], + layers=layers, + ) + if block_cfg.type in ('self_attn',): + # add feat_size arg for blocks that support/need it + block_kwargs['feat_size'] = feat_size + block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) + blocks += [create_block(block_cfg.type, **block_kwargs)] + first_dilation = dilation + prev_chs = out_chs + if stride > 1 and block_idx == 0: + feat_size = reduce_feat_size(feat_size, stride) + + stages += [nn.Sequential(*blocks)] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}', stage=stage_idx + 1) + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info, feat_size + + +def get_layer_fns(cfg: ByoModelCfg, allow_aa: bool = True): + act = get_act_layer(cfg.act_layer) + norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) + if cfg.aa_layer and allow_aa: + conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act, aa_layer=cfg.aa_layer) + else: + conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) + attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None + layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) + return layer_fn + + +class ByobNet(nn.Module): + """ 'Bring-your-own-blocks' Net + + A flexible network backbone that allows building model stem + blocks via + dataclass cfg definition w/ factory functions for module instantiation. + + Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). + """ + def __init__( + self, + cfg: ByoModelCfg, + num_classes: int = 1000, + in_chans: int = 3, + global_pool: Optional[str] = None, + output_stride: int = 32, + img_size: Optional[Union[int, Tuple[int, int]]] = None, + drop_rate: float = 0., + drop_path_rate: float =0., + zero_init_last: bool = True, + **kwargs, + ): + """ + Args: + cfg: Model architecture configuration. + num_classes: Number of classifier classes. + in_chans: Number of input channels. + global_pool: Global pooling type. + output_stride: Output stride of network, one of (8, 16, 32). + img_size: Image size for fixed image size models (i.e. self-attn). + drop_rate: Classifier dropout rate. + drop_path_rate: Stochastic depth drop-path rate. + zero_init_last: Zero-init last weight of residual path. + **kwargs: Extra kwargs overlayed onto cfg. + """ + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg + stem_layers = get_layer_fns(cfg, allow_aa=False) # keep aa off for stem-layers + stage_layers = get_layer_fns(cfg) + if cfg.fixed_input_size: + assert img_size is not None, 'img_size argument is required for fixed input size model' + feat_size = to_2tuple(img_size) if img_size is not None else None + + self.feature_info = [] + if isinstance(cfg.stem_chs, (list, tuple)): + stem_chs = [int(round(c * cfg.width_factor)) for c in cfg.stem_chs] + else: + stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) + self.stem, stem_feat = create_byob_stem( + in_chs=in_chans, + out_chs=stem_chs, + stem_type=cfg.stem_type, + pool_type=cfg.stem_pool, + layers=stem_layers, + ) + self.feature_info.extend(stem_feat[:-1]) + feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) + + self.stages, stage_feat, feat_size = create_byob_stages( + cfg, + drop_path_rate, + output_stride, + stem_feat[-1], + layers=stage_layers, + feat_size=feat_size, + ) + self.feature_info.extend(stage_feat[:-1]) + reduction = stage_feat[-1]['reduction'] + + prev_chs = stage_feat[-1]['num_chs'] + if cfg.num_features: + self.num_features = int(round(cfg.width_factor * cfg.num_features)) + self.final_conv = stage_layers.conv_norm_act(prev_chs, self.num_features, 1) + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.feature_info += [ + dict(num_chs=self.num_features, reduction=reduction, module='final_conv', stage=len(self.stages))] + self.stage_ends = [f['stage'] for f in self.feature_info] + + self.head_hidden_size = self.num_features + assert cfg.head_type in ('', 'classifier', 'mlp', 'attn_abs', 'attn_rot') + if cfg.head_type == 'mlp': + if global_pool is None: + global_pool = 'avg' + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + hidden_size=cfg.head_hidden_size, + pool_type=global_pool, + norm_layer=cfg.norm_layer, + act_layer=cfg.act_layer, + drop_rate=self.drop_rate, + ) + self.head_hidden_size = self.head.hidden_size + elif cfg.head_type == 'attn_abs': + if global_pool is None: + global_pool = 'token' + assert global_pool in ('', 'token') + self.head = AttentionPool2d( + self.num_features, + embed_dim=cfg.head_hidden_size, + out_features=num_classes, + feat_size=feat_size, + pool_type=global_pool, + drop_rate=self.drop_rate, + qkv_separate=True, + ) + self.head_hidden_size = self.head.embed_dim + elif cfg.head_type =='attn_rot': + if global_pool is None: + global_pool = 'token' + assert global_pool in ('', 'token') + self.head = RotAttentionPool2d( + self.num_features, + embed_dim=cfg.head_hidden_size, + out_features=num_classes, + ref_feat_size=feat_size, + pool_type=global_pool, + drop_rate=self.drop_rate, + qkv_separate=True, + ) + self.head_hidden_size = self.head.embed_dim + else: + if global_pool is None: + global_pool = 'avg' + assert cfg.head_hidden_size is None + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + self.global_pool = global_pool + + # init weights + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=[ + (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), + (r'^final_conv', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + exclude_final_conv: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + exclude_final_conv: Exclude final_conv from last intermediate + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + # forward pass + feat_idx = 0 # stem is index 0 + if hasattr(self.stem, 'forward_intermediates'): + # returns last intermediate features in stem (before final stride in stride > 2 stems) + x, x_inter = self.stem.forward_intermediates(x) + else: + x, x_inter = self.stem(x), None + if feat_idx in take_indices: + intermediates.append(x if x_inter is None else x_inter) + last_idx = self.stage_ends[-1] + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index] + for stage in stages: + feat_idx += 1 + x = stage(x) + if not exclude_final_conv and feat_idx == last_idx: + # default feature_info for this model uses final_conv as the last feature output (if present) + x = self.final_conv(x) + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + if exclude_final_conv and feat_idx == last_idx: + x = self.final_conv(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 + if max_index < self.stage_ends[-1]: + self.final_conv = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.final_conv(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights(zero_init_last=zero_init_last) + + +model_cfgs = dict( + gernet_l=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_m=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_s=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), + ), + stem_chs=13, + stem_pool=None, + num_features=1920, + ), + + repvgg_a0=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)), + stem_type='rep', + stem_chs=48, + ), + repvgg_a1=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_a2=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b0=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_d2se=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1), + ), + + # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks + # DW convs in last block, 2048 pre-FC, silu act + resnet51q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad2', + stem_pool=None, + num_features=2048, + act_layer='silu', + ), + + # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks + # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act + resnet61q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad', + stem_pool=None, + num_features=2048, + act_layer='silu', + block_kwargs=dict(extra_conv=True), + ), + + # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, + # and a tiered stem w/ maxpool + resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + ), + gcresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + seresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='se', + ), + eca_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + ), + bat_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='bat', + attn_kwargs=dict(block_size=8) + ), + + # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool + resnet32ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=0, + act_layer='silu', + ), + + # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool + resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + ), + + # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat + # and a tiered stem w/ no maxpool + gcresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='gca', + ), + seresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='se', + ), + eca_resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='eca', + ), + + gcresnet50t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + attn_layer='gca', + ), + + gcresnext50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + + # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW + regnetz_b16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d32=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_e8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=2048, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + + # experimental EvoNorm configs + regnetz_b16_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c16_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='deep', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + + mobileone_s0=ByoModelCfg( + blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4), + stem_type='one', + stem_chs=48, + ), + mobileone_s1=ByoModelCfg( + blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)), + stem_type='one', + stem_chs=64, + ), + mobileone_s2=ByoModelCfg( + blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)), + stem_type='one', + stem_chs=64, + ), + mobileone_s3=ByoModelCfg( + blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)), + stem_type='one', + stem_chs=64, + ), + mobileone_s4=ByoModelCfg( + blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)), + stem_type='one', + stem_chs=64, + ), + + resnet50_clip=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_type='attn_abs', + ), + resnet101_clip=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=23, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_type='attn_abs', + ), + resnet50x4_clip=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=4, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=10, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=2048, s=2, br=0.25), + ), + width_factor=1.25, + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_type='attn_abs', + ), + resnet50x16_clip=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=6, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=8, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=18, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=8, c=2048, s=2, br=0.25), + ), + width_factor=1.5, + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_type='attn_abs', + ), + resnet50x64_clip=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=15, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=36, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=10, c=2048, s=2, br=0.25), + ), + width_factor=2.0, + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_type='attn_abs', + ), + + resnet50_mlp=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=(32, 32, 64), + stem_type='', + stem_pool='avg2', + downsample='avg', + aa_layer='avg', + head_hidden_size=1024, + head_type='mlp', + ), + + test_byobnet=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=32, s=2, gs=0, br=0.5), + ByoBlockCfg(type='dark', d=1, c=64, s=2, gs=0, br=0.5), + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=1, c=256, s=2, gs=64, br=0.25), + ), + stem_chs=24, + downsample='avg', + stem_pool='', + act_layer='relu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + ), +) +for k in ('resnet50_clip', 'resnet101_clip', 'resnet50x4_clip', 'resnet50x16_clip', 'resnet50x64_clip'): + model_cfgs[k + '_gap'] = replace(model_cfgs[k], head_type='classifier') + + +def _convert_openai_clip( + state_dict: Dict[str, torch.Tensor], + model: ByobNet, + prefix: str = 'visual.', +) -> Dict[str, torch.Tensor]: + model_has_attn_pool = isinstance(model.head, (RotAttentionPool2d, AttentionPool2d)) + import re + + def _stage_sub(m): + stage_idx = int(m.group(1)) - 1 + layer_idx, layer_type, layer_id = int(m.group(2)), m.group(3), int(m.group(4)) + prefix_str = f'stages.{stage_idx}.{layer_idx}.' + id_map = {1: 'conv1_1x1.', 2: 'conv2_kxk.', 3: 'conv3_1x1.'} + suffix_str = id_map[layer_id] + layer_type + return prefix_str + suffix_str + + def _down_sub(m): + stage_idx = int(m.group(1)) - 1 + layer_idx, layer_id = int(m.group(2)), int(m.group(3)) + return f'stages.{stage_idx}.{layer_idx}.shortcut.' + ('conv.conv' if layer_id == 0 else 'conv.bn') + + out_dict = {} + for k, v in state_dict.items(): + if not k.startswith(prefix): + continue + k = re.sub(rf'{prefix}conv([0-9])', r'stem.conv\1.conv', k) + k = re.sub(rf'{prefix}bn([0-9])', r'stem.conv\1.bn', k) + k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.([a-z]+)([0-9])', _stage_sub, k) + k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.downsample\.([0-9])', _down_sub, k) + if k.startswith(f'{prefix}attnpool'): + if not model_has_attn_pool: + continue + k = k.replace(prefix + 'attnpool', 'head') #'attn_pool') + k = k.replace('positional_embedding', 'pos_embed') + k = k.replace('q_proj', 'q') + k = k.replace('k_proj', 'k') + k = k.replace('v_proj', 'v') + k = k.replace('c_proj', 'proj') + out_dict[k] = v + + return out_dict + + +def checkpoint_filter_fn( + state_dict: Dict[str, torch.Tensor], + model: ByobNet +): + if 'visual.conv1.weight' in state_dict: + state_dict = _convert_openai_clip(state_dict, model) + return state_dict + + +def _create_byobnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgr(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # GPU-Efficient (ResNet) weights + 'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'), + 'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'), + 'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), + + # RepVGG weights + 'repvgg_a0.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_a1.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_a2.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b0.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b1.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b1g4.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b2.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b2g4.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b3.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_b3g4.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), + 'repvgg_d2se.rvgg_in1k': _cfg( + hf_hub_id='timm/', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, + ), + + # experimental ResNet configs + 'resnet51q.ra2_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', + first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnet61q.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + # ResNeXt-26 models with different attention in Bottleneck blocks + 'resnext26ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'seresnext26ts.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'gcresnext26ts.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'eca_resnext26ts.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'bat_resnext26ts.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', + min_input_size=(3, 256, 256)), + + # ResNet-32 / 33 models with different attention in Bottleneck blocks + 'resnet32ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnet33ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'gcresnet33ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'seresnet33ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'eca_resnet33ts.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'gcresnet50t.ra2_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'gcresnext50ts.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + # custom `timm` specific RegNetZ inspired models w/ different sizing from paper + 'regnetz_b16.ra3_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', + first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'regnetz_c16.ra3_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', + first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'regnetz_d32.ra3_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), + 'regnetz_d8.ra3_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'regnetz_e8.ra3_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), + + 'regnetz_b16_evos.untrained': _cfgr( + first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)), + 'regnetz_c16_evos.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', + first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'regnetz_d8_evos.ch_in1k': _cfgr( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), + + 'mobileone_s0.apple_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.875, + first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), + ), + 'mobileone_s1.apple_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9, + first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), + ), + 'mobileone_s2.apple_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9, + first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), + ), + 'mobileone_s3.apple_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9, + first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), + ), + 'mobileone_s4.apple_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9, + first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), + ), + + # original attention pool head variants + 'resnet50_clip.openai': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), + classifier='head.proj', + ), + 'resnet101_clip.openai': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), + classifier='head.proj', + ), + 'resnet50x4_clip.openai': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=640, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 288, 288), pool_size=(9, 9), + classifier='head.proj', + ), + 'resnet50x16_clip.openai': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=768, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 384, 384), pool_size=(12, 12), + classifier='head.proj', + ), + 'resnet50x64_clip.openai': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 448, 448), pool_size=(14, 14), + classifier='head.proj', + ), + 'resnet50_clip.cc12m': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), + classifier='head.proj', + ), + 'resnet50_clip.yfcc15m': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), + classifier='head.proj', + ), + 'resnet101_clip.yfcc15m': _cfgr( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), + classifier='head.proj', + ), + + # avg-pool w/ optional standard classifier head variants + 'resnet50_clip_gap.openai': _cfgr( + hf_hub_id='timm/resnet50_clip.openai', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 224, 224), pool_size=(7, 7), + ), + 'resnet101_clip_gap.openai': _cfgr( + hf_hub_id='timm/resnet101_clip.openai', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 224, 224), pool_size=(7, 7), + ), + 'resnet50x4_clip_gap.openai': _cfgr( + hf_hub_id='timm/resnet50x4_clip.openai', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 288, 288), pool_size=(9, 9), + ), + 'resnet50x16_clip_gap.openai': _cfgr( + hf_hub_id='timm/resnet50x16_clip.openai', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 384, 384), pool_size=(12, 12), + ), + 'resnet50x64_clip_gap.openai': _cfgr( + hf_hub_id='timm/resnet50x64_clip.openai', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 448, 448), pool_size=(14, 14), + ), + 'resnet50_clip_gap.cc12m': _cfgr( + hf_hub_id='timm/resnet50_clip.cc12m', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 224, 224), pool_size=(7, 7), + ), + 'resnet50_clip_gap.yfcc15m': _cfgr( + hf_hub_id='timm/resnet50_clip.yfcc15m', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 224, 224), pool_size=(7, 7), + ), + 'resnet101_clip_gap.yfcc15m': _cfgr( + hf_hub_id='timm/resnet101_clip.yfcc15m', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 224, 224), pool_size=(7, 7), + ), + + 'resnet50_mlp.untrained': _cfgr( + input_size=(3, 256, 256), pool_size=(8, 8), + ), + + 'test_byobnet.r160_in1k': _cfgr( + hf_hub_id='timm/', + first_conv='stem.conv', + input_size=(3, 160, 160), crop_pct=0.95, pool_size=(5, 5), + ), +}) + + +@register_model +def gernet_l(pretrained=False, **kwargs) -> ByobNet: + """ GEResNet-Large (GENet-Large from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_m(pretrained=False, **kwargs) -> ByobNet: + """ GEResNet-Medium (GENet-Normal from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_s(pretrained=False, **kwargs) -> ByobNet: + """ EResNet-Small (GENet-Small from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a0(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-A0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a1(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-A1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a2(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-A2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b0(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B1g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B2g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B3 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-B3g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet: + """ RepVGG-D2se + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs) + + +@register_model +def resnet51q(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) + + +@register_model +def resnet61q(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) + + +@register_model +def resnext26ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnext26ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet32ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet33ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnet33ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet50t(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b16(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c16(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d32(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_e8(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) + + +@register_model +def mobileone_s0(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs) + + +@register_model +def mobileone_s1(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs) + + +@register_model +def mobileone_s2(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs) + + +@register_model +def mobileone_s3(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs) + + +@register_model +def mobileone_s4(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50_clip(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50 CLIP image tower + """ + return _create_byobnet('resnet50_clip', pretrained=pretrained, **kwargs) + + +@register_model +def resnet101_clip(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-101 CLIP image tower + """ + return _create_byobnet('resnet101_clip', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x4_clip(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x4 CLIP image tower + """ + return _create_byobnet('resnet50x4_clip', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x16_clip(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x16 CLIP image tower + """ + return _create_byobnet('resnet50x16_clip', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x64_clip(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x64 CLIP image tower + """ + return _create_byobnet('resnet50x64_clip', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50_clip_gap(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50 CLIP image tower w/ avg pool (no attention pool) + """ + return _create_byobnet('resnet50_clip_gap', pretrained=pretrained, **kwargs) + + +@register_model +def resnet101_clip_gap(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-101 CLIP image tower w/ avg pool (no attention pool) + """ + return _create_byobnet('resnet101_clip_gap', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x4_clip_gap(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x4 CLIP image tower w/ avg pool (no attention pool) + """ + return _create_byobnet('resnet50x4_clip_gap', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x16_clip_gap(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x16 CLIP image tower w/ avg pool (no attention pool) + """ + return _create_byobnet('resnet50x16_clip_gap', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50x64_clip_gap(pretrained=False, **kwargs) -> ByobNet: + """ OpenAI Modified ResNet-50x64 CLIP image tower w/ avg pool (no attention pool) + """ + return _create_byobnet('resnet50x64_clip_gap', pretrained=pretrained, **kwargs) + + +@register_model +def resnet50_mlp(pretrained=False, **kwargs) -> ByobNet: + """ + """ + return _create_byobnet('resnet50_mlp', pretrained=pretrained, **kwargs) + + +@register_model +def test_byobnet(pretrained=False, **kwargs) -> ByobNet: + """ Minimal test ResNet (BYOB based) model. + """ + return _create_byobnet('test_byobnet', pretrained=pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/cait.py b/pytorch-image-models/timm/models/cait.py new file mode 100644 index 0000000000000000000000000000000000000000..28e14ec75604fcd1eafac8fdcc616cb55e9bb7ed --- /dev/null +++ b/pytorch-image-models/timm/models/cait.py @@ -0,0 +1,591 @@ +""" Class-Attention in Image Transformers (CaiT) + +Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 + +Original code and weights from https://github.com/facebookresearch/deit, copyright below + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from functools import partial +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, use_fused_attn +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] + + +class ClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to do CA + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.fused_attn: + x_cls = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x_cls = attn @ v + + x_cls = x_cls.transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class LayerScaleBlockClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add CA and LayerScale + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_block=ClassAttn, + mlp_block=Mlp, + init_values=1e-4, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x, x_cls): + u = torch.cat((x_cls, x), dim=1) + x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) + x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) + return x_cls + + +class TalkingHeadAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + + self.num_heads = num_heads + + head_dim = dim // num_heads + + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + + self.proj_l = nn.Linear(num_heads, num_heads) + self.proj_w = nn.Linear(num_heads, num_heads) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = q @ k.transpose(-2, -1) + + attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attn = attn.softmax(dim=-1) + + attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScaleBlock(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add layerScale + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_values=1e-4, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class Cait(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to adapt to our cait models + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + block_layers=LayerScaleBlock, + block_layers_token=LayerScaleBlockClassAttn, + patch_layer=PatchEmbed, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_values=1e-4, + attn_block_token_only=ClassAttn, + mlp_block_token_only=Mlp, + depth_token_only=2, + mlp_ratio_token_only=4.0 + ): + super().__init__() + assert global_pool in ('', 'token', 'avg') + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim + self.grad_checkpointing = False + + self.patch_embed = patch_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + dpr = [drop_path_rate for i in range(depth)] + self.blocks = nn.Sequential(*[block_layers( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + attn_block=attn_block, + mlp_block=mlp_block, + init_values=init_values, + ) for i in range(depth)]) + self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)] + + self.blocks_token_only = nn.ModuleList([block_layers_token( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio_token_only, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + attn_block=attn_block_token_only, + mlp_block=mlp_block_token_only, + init_values=init_values, + ) for _ in range(depth_token_only)]) + + self.norm = norm_layer(embed_dim) + + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def group_matcher(self, coarse=False): + def _matcher(name): + if any([name.startswith(n) for n in ('cls_token', 'pos_embed', 'patch_embed')]): + return 0 + elif name.startswith('blocks.'): + return int(name.split('.')[1]) + 1 + elif name.startswith('blocks_token_only.'): + # overlap token only blocks with last blocks + to_offset = len(self.blocks) - len(self.blocks_token_only) + 1 + return int(name.split('.')[1]) + to_offset + elif name.startswith('norm.'): + return len(self.blocks) + else: + return float('inf') + return _matcher + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if reshape: + # reshape to BCHW output format + H, W = self.patch_embed.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + + if intermediates_only: + return intermediates + + # NOTE not supporting return of class tokens + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + x = torch.cat((cls_tokens, x), dim=1) + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.blocks_token_only = nn.ModuleList() # prune token blocks with head + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + x = torch.cat((cls_tokens, x), dim=1) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model=None): + if 'model' in state_dict: + state_dict = state_dict['model'] + checkpoint_no_module = {} + for k, v in state_dict.items(): + checkpoint_no_module[k.replace('module.', '')] = v + return checkpoint_no_module + + +def _create_cait(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + Cait, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'cait_xxs24_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', + input_size=(3, 224, 224), + ), + 'cait_xxs24_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', + ), + 'cait_xxs36_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', + input_size=(3, 224, 224), + ), + 'cait_xxs36_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', + ), + 'cait_xs24_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', + ), + 'cait_s24_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', + input_size=(3, 224, 224), + ), + 'cait_s24_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', + ), + 'cait_s36_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', + ), + 'cait_m36_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', + ), + 'cait_m48_448.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', + input_size=(3, 448, 448), + ), +}) + + +@register_model +def cait_xxs24_224(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5) + model = _create_cait('cait_xxs24_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_xxs24_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5) + model = _create_cait('cait_xxs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_xxs36_224(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5) + model = _create_cait('cait_xxs36_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_xxs36_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5) + model = _create_cait('cait_xxs36_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_xs24_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_values=1e-5) + model = _create_cait('cait_xs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_s24_224(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5) + model = _create_cait('cait_s24_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_s24_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5) + model = _create_cait('cait_s24_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_s36_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_values=1e-6) + model = _create_cait('cait_s36_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_m36_384(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_values=1e-6) + model = _create_cait('cait_m36_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def cait_m48_448(pretrained=False, **kwargs) -> Cait: + model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_values=1e-6) + model = _create_cait('cait_m48_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/coat.py b/pytorch-image-models/timm/models/coat.py new file mode 100644 index 0000000000000000000000000000000000000000..906ecb90836bbdd860ef26a961348f6fd10006e1 --- /dev/null +++ b/pytorch-image-models/timm/models/coat.py @@ -0,0 +1,803 @@ +""" +CoaT architecture. + +Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 + +Official CoaT code at: https://github.com/mlpc-ucsd/CoaT + +Modified from timm/models/vision_transformer.py +""" +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['CoaT'] + + +class ConvRelPosEnc(nn.Module): + """ Convolutional relative position encoding. """ + def __init__(self, head_chs, num_heads, window): + """ + Initialization. + Ch: Channels per head. + h: Number of heads. + window: Window size(s) in convolutional relative positional encoding. It can have two forms: + 1. An integer of window size, which assigns all attention heads with the same window s + size in ConvRelPosEnc. + 2. A dict mapping window size to #attention head splits ( + e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) + It will apply different window size to the attention head splits. + """ + super().__init__() + + if isinstance(window, int): + # Set the same window size for all attention heads. + window = {window: num_heads} + self.window = window + elif isinstance(window, dict): + self.window = window + else: + raise ValueError() + + self.conv_list = nn.ModuleList() + self.head_splits = [] + for cur_window, cur_head_split in window.items(): + dilation = 1 + # Determine padding size. + # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 + padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 + cur_conv = nn.Conv2d( + cur_head_split * head_chs, + cur_head_split * head_chs, + kernel_size=(cur_window, cur_window), + padding=(padding_size, padding_size), + dilation=(dilation, dilation), + groups=cur_head_split * head_chs, + ) + self.conv_list.append(cur_conv) + self.head_splits.append(cur_head_split) + self.channel_splits = [x * head_chs for x in self.head_splits] + + def forward(self, q, v, size: Tuple[int, int]): + B, num_heads, N, C = q.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Convolutional relative position encoding. + q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] + v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] + + v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W) + v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels + conv_v_img_list = [] + for i, conv in enumerate(self.conv_list): + conv_v_img_list.append(conv(v_img_list[i])) + conv_v_img = torch.cat(conv_v_img_list, dim=1) + conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2) + + EV_hat = q_img * conv_v_img + EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. + return EV_hat + + +class FactorAttnConvRelPosEnc(nn.Module): + """ Factorized attention with convolutional relative position encoding class. """ + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + shared_crpe=None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + # Shared convolutional relative position encoding. + self.crpe = shared_crpe + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + + # Generate Q, K, V. + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # [B, h, N, Ch] + + # Factorized attention. + k_softmax = k.softmax(dim=2) + factor_att = k_softmax.transpose(-1, -2) @ v + factor_att = q @ factor_att + + # Convolutional relative position encoding. + crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] + + # Merge and reshape. + x = self.scale * factor_att + crpe + x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] + + # Output projection. + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class ConvPosEnc(nn.Module): + """ Convolutional Position Encoding. + Note: This module is similar to the conditional position encoding in CPVT. + """ + def __init__(self, dim, k=3): + super(ConvPosEnc, self).__init__() + self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Extract CLS token and image tokens. + cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] + + # Depthwise convolution. + feat = img_tokens.transpose(1, 2).view(B, C, H, W) + x = self.proj(feat) + feat + x = x.flatten(2).transpose(1, 2) + + # Combine with CLS token. + x = torch.cat((cls_token, x), dim=1) + + return x + + +class SerialBlock(nn.Module): + """ Serial block class. + Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + shared_cpe=None, + shared_crpe=None, + ): + super().__init__() + + # Conv-Attention. + self.cpe = shared_cpe + + self.norm1 = norm_layer(dim) + self.factoratt_crpe = FactorAttnConvRelPosEnc( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + shared_crpe=shared_crpe, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + + def forward(self, x, size: Tuple[int, int]): + # Conv-Attention. + x = self.cpe(x, size) + cur = self.norm1(x) + cur = self.factoratt_crpe(cur, size) + x = x + self.drop_path(cur) + + # MLP. + cur = self.norm2(x) + cur = self.mlp(cur) + x = x + self.drop_path(cur) + + return x + + +class ParallelBlock(nn.Module): + """ Parallel block class. """ + def __init__( + self, + dims, + num_heads, + mlp_ratios=[], + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + shared_crpes=None, + ): + super().__init__() + + # Conv-Attention. + self.norm12 = norm_layer(dims[1]) + self.norm13 = norm_layer(dims[2]) + self.norm14 = norm_layer(dims[3]) + self.factoratt_crpe2 = FactorAttnConvRelPosEnc( + dims[1], + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + shared_crpe=shared_crpes[1], + ) + self.factoratt_crpe3 = FactorAttnConvRelPosEnc( + dims[2], + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + shared_crpe=shared_crpes[2], + ) + self.factoratt_crpe4 = FactorAttnConvRelPosEnc( + dims[3], + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + shared_crpe=shared_crpes[3], + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm22 = norm_layer(dims[1]) + self.norm23 = norm_layer(dims[2]) + self.norm24 = norm_layer(dims[3]) + # In parallel block, we assume dimensions are the same and share the linear transformation. + assert dims[1] == dims[2] == dims[3] + assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] + mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) + self.mlp2 = self.mlp3 = self.mlp4 = Mlp( + in_features=dims[1], + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + + def upsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map up-sampling. """ + return self.interpolate(x, scale_factor=factor, size=size) + + def downsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map down-sampling. """ + return self.interpolate(x, scale_factor=1.0/factor, size=size) + + def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): + """ Feature map interpolation. """ + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + cls_token = x[:, :1, :] + img_tokens = x[:, 1:, :] + + img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) + img_tokens = F.interpolate( + img_tokens, + scale_factor=scale_factor, + recompute_scale_factor=False, + mode='bilinear', + align_corners=False, + ) + img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) + + out = torch.cat((cls_token, img_tokens), dim=1) + + return out + + def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): + _, S2, S3, S4 = sizes + cur2 = self.norm12(x2) + cur3 = self.norm13(x3) + cur4 = self.norm14(x4) + cur2 = self.factoratt_crpe2(cur2, size=S2) + cur3 = self.factoratt_crpe3(cur3, size=S3) + cur4 = self.factoratt_crpe4(cur4, size=S4) + upsample3_2 = self.upsample(cur3, factor=2., size=S3) + upsample4_3 = self.upsample(cur4, factor=2., size=S4) + upsample4_2 = self.upsample(cur4, factor=4., size=S4) + downsample2_3 = self.downsample(cur2, factor=2., size=S2) + downsample3_4 = self.downsample(cur3, factor=2., size=S3) + downsample2_4 = self.downsample(cur2, factor=4., size=S2) + cur2 = cur2 + upsample3_2 + upsample4_2 + cur3 = cur3 + upsample4_3 + downsample2_3 + cur4 = cur4 + downsample3_4 + downsample2_4 + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + # MLP. + cur2 = self.norm22(x2) + cur3 = self.norm23(x3) + cur4 = self.norm24(x4) + cur2 = self.mlp2(cur2) + cur3 = self.mlp3(cur3) + cur4 = self.mlp4(cur4) + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + return x1, x2, x3, x4 + + +class CoaT(nn.Module): + """ CoaT class. """ + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dims=(64, 128, 320, 512), + serial_depths=(3, 4, 6, 3), + parallel_depth=0, + num_heads=8, + mlp_ratios=(4, 4, 4, 4), + qkv_bias=True, + drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=LayerNorm, + return_interm_layers=False, + out_features=None, + crpe_window=None, + global_pool='token', + ): + super().__init__() + assert global_pool in ('token', 'avg') + crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} + self.return_interm_layers = return_interm_layers + self.out_features = out_features + self.embed_dims = embed_dims + self.num_features = self.head_hidden_size = embed_dims[-1] + self.num_classes = num_classes + self.global_pool = global_pool + + # Patch embeddings. + img_size = to_2tuple(img_size) + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) + self.patch_embed2 = PatchEmbed( + img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) + self.patch_embed3 = PatchEmbed( + img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) + self.patch_embed4 = PatchEmbed( + img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], + embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) + + # Class tokens. + self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) + self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) + self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) + self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) + + # Convolutional position encodings. + self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) + self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) + self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) + self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) + + # Convolutional relative position encodings. + self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window) + self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window) + self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window) + self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window) + + # Disable stochastic depth. + dpr = drop_path_rate + assert dpr == 0.0 + skwargs = dict( + num_heads=num_heads, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr, + norm_layer=norm_layer, + ) + + # Serial blocks 1. + self.serial_blocks1 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[0], + mlp_ratio=mlp_ratios[0], + shared_cpe=self.cpe1, + shared_crpe=self.crpe1, + **skwargs, + ) + for _ in range(serial_depths[0])] + ) + + # Serial blocks 2. + self.serial_blocks2 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[1], + mlp_ratio=mlp_ratios[1], + shared_cpe=self.cpe2, + shared_crpe=self.crpe2, + **skwargs, + ) + for _ in range(serial_depths[1])] + ) + + # Serial blocks 3. + self.serial_blocks3 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[2], + mlp_ratio=mlp_ratios[2], + shared_cpe=self.cpe3, + shared_crpe=self.crpe3, + **skwargs, + ) + for _ in range(serial_depths[2])] + ) + + # Serial blocks 4. + self.serial_blocks4 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[3], + mlp_ratio=mlp_ratios[3], + shared_cpe=self.cpe4, + shared_crpe=self.crpe4, + **skwargs, + ) + for _ in range(serial_depths[3])] + ) + + # Parallel blocks. + self.parallel_depth = parallel_depth + if self.parallel_depth > 0: + self.parallel_blocks = nn.ModuleList([ + ParallelBlock( + dims=embed_dims, + mlp_ratios=mlp_ratios, + shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4), + **skwargs, + ) + for _ in range(parallel_depth)] + ) + else: + self.parallel_blocks = None + + # Classification head(s). + if not self.return_interm_layers: + if self.parallel_blocks is not None: + self.norm2 = norm_layer(embed_dims[1]) + self.norm3 = norm_layer(embed_dims[2]) + else: + self.norm2 = self.norm3 = None + self.norm4 = norm_layer(embed_dims[3]) + + if self.parallel_depth > 0: + # CoaT series: Aggregate features of last three scales for classification. + assert embed_dims[1] == embed_dims[2] == embed_dims[3] + self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + # CoaT-Lite series: Use feature of last scale for classification. + self.aggregate = None + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Initialize weights. + trunc_normal_(self.cls_token1, std=.02) + trunc_normal_(self.cls_token2, std=.02) + trunc_normal_(self.cls_token3, std=.02) + trunc_normal_(self.cls_token4, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem1=r'^cls_token1|patch_embed1|crpe1|cpe1', + serial_blocks1=r'^serial_blocks1\.(\d+)', + stem2=r'^cls_token2|patch_embed2|crpe2|cpe2', + serial_blocks2=r'^serial_blocks2\.(\d+)', + stem3=r'^cls_token3|patch_embed3|crpe3|cpe3', + serial_blocks3=r'^serial_blocks3\.(\d+)', + stem4=r'^cls_token4|patch_embed4|crpe4|cpe4', + serial_blocks4=r'^serial_blocks4\.(\d+)', + parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks?? + (r'^parallel_blocks\.(\d+)', None), + (r'^norm|aggregate', (99999,)), + ] + ) + return matcher + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x0): + B = x0.shape[0] + + # Serial blocks 1. + x1 = self.patch_embed1(x0) + H1, W1 = self.patch_embed1.grid_size + x1 = insert_cls(x1, self.cls_token1) + for blk in self.serial_blocks1: + x1 = blk(x1, size=(H1, W1)) + x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 2. + x2 = self.patch_embed2(x1_nocls) + H2, W2 = self.patch_embed2.grid_size + x2 = insert_cls(x2, self.cls_token2) + for blk in self.serial_blocks2: + x2 = blk(x2, size=(H2, W2)) + x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 3. + x3 = self.patch_embed3(x2_nocls) + H3, W3 = self.patch_embed3.grid_size + x3 = insert_cls(x3, self.cls_token3) + for blk in self.serial_blocks3: + x3 = blk(x3, size=(H3, W3)) + x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 4. + x4 = self.patch_embed4(x3_nocls) + H4, W4 = self.patch_embed4.grid_size + x4 = insert_cls(x4, self.cls_token4) + for blk in self.serial_blocks4: + x4 = blk(x4, size=(H4, W4)) + x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + + # Only serial blocks: Early return. + if self.parallel_blocks is None: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + # Return features for classification. + x4 = self.norm4(x4) + return x4 + + # Parallel blocks. + for blk in self.parallel_blocks: + x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) + x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) + + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + x2 = self.norm2(x2) + x3 = self.norm3(x3) + x4 = self.norm4(x4) + return [x2, x3, x4] + + def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False): + if isinstance(x_feat, list): + assert self.aggregate is not None + if self.global_pool == 'avg': + x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C] + else: + x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C] + x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C] + else: + x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0] + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x) -> torch.Tensor: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features (for down-stream tasks). + return self.forward_features(x) + else: + # Return features for classification. + x_feat = self.forward_features(x) + x = self.forward_head(x_feat) + return x + + +def insert_cls(x, cls_token): + """ Insert CLS token. """ + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + return x + + +def remove_cls(x): + """ Remove CLS token. """ + return x[:, 1:, :] + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + state_dict = state_dict.get('model', state_dict) + for k, v in state_dict.items(): + # original model had unused norm layers, removing them requires filtering pretrained checkpoints + if k.startswith('norm1') or \ + (k.startswith('norm2') and getattr(model, 'norm2', None) is None) or \ + (k.startswith('norm3') and getattr(model, 'norm3', None) is None) or \ + (k.startswith('norm4') and getattr(model, 'norm4', None) is None) or \ + (k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or \ + (k.startswith('head') and getattr(model, 'head', None) is None): + continue + out_dict[k] = v + return out_dict + + +def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + CoaT, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) + return model + + +def _cfg_coat(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed1.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'), + 'coat_lite_medium_384.in1k': _cfg_coat( + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash', + ), +}) + + +@register_model +def coat_tiny(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6) + model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_mini(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6) + model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_small(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs) + model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) + model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_lite_mini(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) + model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_lite_small(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4]) + model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_lite_medium(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) + model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT: + model_cfg = dict( + img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) + model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model \ No newline at end of file diff --git a/pytorch-image-models/timm/models/convit.py b/pytorch-image-models/timm/models/convit.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe3b51ecea5180d12fa437e07767d0f2ea21577 --- /dev/null +++ b/pytorch-image-models/timm/models/convit.py @@ -0,0 +1,428 @@ +""" ConViT Model + +@article{d2021convit, + title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, + author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, + journal={arXiv preprint arXiv:2103.10697}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.10697 +Original code: https://github.com/facebookresearch/convit, original copyright below + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the CC-by-NC license found in the +# LICENSE file in the root directory of this source tree. +# +'''These modules are adapted from those of timm, see +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +''' +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm, HybridEmbed +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._registry import register_model, generate_default_cfgs + + +__all__ = ['ConVit'] + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class GPSA(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + locality_strength=1., + ): + super().__init__() + self.num_heads = num_heads + self.dim = dim + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.locality_strength = locality_strength + + self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.pos_proj = nn.Linear(3, num_heads) + self.proj_drop = nn.Dropout(proj_drop) + self.gating_param = nn.Parameter(torch.ones(self.num_heads)) + self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None + + def forward(self, x): + B, N, C = x.shape + if self.rel_indices is None or self.rel_indices.shape[1] != N: + self.rel_indices = self.get_rel_indices(N) + attn = self.get_attention(x) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def get_attention(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] + pos_score = self.rel_indices.expand(B, -1, -1, -1) + pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) + patch_score = (q @ k.transpose(-2, -1)) * self.scale + patch_score = patch_score.softmax(dim=-1) + pos_score = pos_score.softmax(dim=-1) + + gating = self.gating_param.view(1, -1, 1, 1) + attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score + attn /= attn.sum(dim=-1).unsqueeze(-1) + attn = self.attn_drop(attn) + return attn + + def get_attention_map(self, x, return_map=False): + attn_map = self.get_attention(x).mean(0) # average over batch + distances = self.rel_indices.squeeze()[:, :, -1] ** .5 + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) + if return_map: + return dist, attn_map + else: + return dist + + def local_init(self): + self.v.weight.data.copy_(torch.eye(self.dim)) + locality_distance = 1 # max(1,1/locality_strength**.5) + + kernel_size = int(self.num_heads ** .5) + center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 + for h1 in range(kernel_size): + for h2 in range(kernel_size): + position = h1 + kernel_size * h2 + self.pos_proj.weight.data[position, 2] = -1 + self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance + self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance + self.pos_proj.weight.data *= self.locality_strength + + def get_rel_indices(self, num_patches: int) -> torch.Tensor: + img_size = int(num_patches ** .5) + rel_indices = torch.zeros(1, num_patches, num_patches, 3) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + rel_indices[:, :, :, 2] = indd.unsqueeze(0) + rel_indices[:, :, :, 1] = indy.unsqueeze(0) + rel_indices[:, :, :, 0] = indx.unsqueeze(0) + device = self.qk.weight.device + return rel_indices.to(device) + + +class MHSA(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def get_attention_map(self, x, return_map=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_map = (q @ k.transpose(-2, -1)) * self.scale + attn_map = attn_map.softmax(dim=-1).mean(0) + + img_size = int(N ** .5) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + distances = indd ** .5 + distances = distances.to(x.device) + + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N + if return_map: + return dist, attn_map + else: + return dist + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=LayerNorm, + use_gpsa=True, + locality_strength=1., + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.use_gpsa = use_gpsa + if self.use_gpsa: + self.attn = GPSA( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + locality_strength=locality_strength, + ) + else: + self.attn = MHSA( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConVit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + hybrid_backbone=None, + norm_layer=LayerNorm, + local_up_to_layer=3, + locality_strength=1., + use_pos_embed=True, + ): + super().__init__() + assert global_pool in ('', 'avg', 'token') + embed_dim *= num_heads + self.num_classes = num_classes + self.global_pool = global_pool + self.local_up_to_layer = local_up_to_layer + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.locality_strength = locality_strength + self.use_pos_embed = use_pos_embed + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed( + hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + if self.use_pos_embed: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.pos_embed, std=.02) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + use_gpsa=i < local_up_to_layer, + locality_strength=locality_strength, + ) for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + for n, m in self.named_modules(): + if hasattr(m, 'local_init'): + m.local_init() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.use_pos_embed: + x = x + self.pos_embed + x = self.pos_drop(x) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + for u, blk in enumerate(self.blocks): + if u == self.local_up_to_layer: + x = torch.cat((cls_tokens, x), dim=1) + x = blk(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_convit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + return build_model_with_cfg(ConVit, variant, pretrained, **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # ConViT + 'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'), + 'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'), + 'convit_base.fb_in1k': _cfg(hf_hub_id='timm/') +}) + + +@register_model +def convit_tiny(pretrained=False, **kwargs) -> ConVit: + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4) + model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convit_small(pretrained=False, **kwargs) -> ConVit: + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9) + model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convit_base(pretrained=False, **kwargs) -> ConVit: + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16) + model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/convmixer.py b/pytorch-image-models/timm/models/convmixer.py new file mode 100644 index 0000000000000000000000000000000000000000..c7a250776ac67deda4f3c745b2d3544ccb20e352 --- /dev/null +++ b/pytorch-image-models/timm/models/convmixer.py @@ -0,0 +1,146 @@ +""" ConvMixer + +""" +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectAdaptivePool2d +from ._registry import register_model, generate_default_cfgs +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq + +__all__ = ['ConvMixer'] + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +class ConvMixer(nn.Module): + def __init__( + self, + dim, + depth, + kernel_size=9, + patch_size=7, + in_chans=3, + num_classes=1000, + global_pool='avg', + drop_rate=0., + act_layer=nn.GELU, + **kwargs, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = self.head_hidden_size = dim + self.grad_checkpointing = False + + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), + act_layer(), + nn.BatchNorm2d(dim) + ) + self.blocks = nn.Sequential( + *[nn.Sequential( + Residual(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), + act_layer(), + nn.BatchNorm2d(dim) + )), + nn.Conv2d(dim, dim, kernel_size=1), + act_layer(), + nn.BatchNorm2d(dim) + ) for i in range(depth)] + ) + self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.pooling(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_convmixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for ConvMixer models.') + + return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', + 'first_conv': 'stem.0', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'), + 'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'), + 'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/') +}) + + + +@register_model +def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer: + model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) + return _create_convmixer('convmixer_1536_20', pretrained, **model_args) + + +@register_model +def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer: + model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs) + return _create_convmixer('convmixer_768_32', pretrained, **model_args) + + +@register_model +def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer: + model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) + return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/convnext.py b/pytorch-image-models/timm/models/convnext.py new file mode 100644 index 0000000000000000000000000000000000000000..e682379f64db0a2326343283bcc9486ba371fa48 --- /dev/null +++ b/pytorch-image-models/timm/models/convnext.py @@ -0,0 +1,1241 @@ +""" ConvNeXt + +Papers: +* `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf +@Article{liu2022convnet, + author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, + title = {A ConvNet for the 2020s}, + journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2022}, +} + +* `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 +@article{Woo2023ConvNeXtV2, + title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders}, + author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie}, + year={2023}, + journal={arXiv preprint arXiv:2301.00808}, +} + +Original code and weights from: +* https://github.com/facebookresearch/ConvNeXt, original copyright below +* https://github.com/facebookresearch/ConvNeXt-V2, original copyright below + +Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals. + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# ConvNeXt +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# This source code is licensed under the MIT license + +# ConvNeXt-V2 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)) +# No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially. + +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \ + LayerNorm2d, LayerNorm, RmsNorm2d, RmsNorm, create_conv2d, get_act_layer, get_norm_layer, make_divisible, to_ntuple +from timm.layers import NormMlpClassifierHead, ClassifierHead +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply, checkpoint_seq +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this + + +class Downsample(nn.Module): + + def __init__(self, in_chs, out_chs, stride=1, dilation=1): + super().__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + + if in_chs != out_chs: + self.conv = create_conv2d(in_chs, out_chs, 1, stride=1) + else: + self.conv = nn.Identity() + + def forward(self, x): + x = self.pool(x) + x = self.conv(x) + return x + + +class ConvNeXtBlock(nn.Module): + """ ConvNeXt Block + There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + + Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate + choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear + is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 7, + stride: int = 1, + dilation: Union[int, Tuple[int, int]] = (1, 1), + mlp_ratio: float = 4, + conv_mlp: bool = False, + conv_bias: bool = True, + use_grn: bool = False, + ls_init_value: Optional[float] = 1e-6, + act_layer: Union[str, Callable] = 'gelu', + norm_layer: Optional[Callable] = None, + drop_path: float = 0., + ): + """ + + Args: + in_chs: Block input channels. + out_chs: Block output channels (same as in_chs if None). + kernel_size: Depthwise convolution kernel size. + stride: Stride of depthwise convolution. + dilation: Tuple specifying input and output dilation of block. + mlp_ratio: MLP expansion ratio. + conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True. + conv_bias: Apply bias for all convolution (linear) layers. + use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2) + ls_init_value: Layer-scale init values, layer-scale applied if not None. + act_layer: Activation layer. + norm_layer: Normalization layer (defaults to LN if not specified). + drop_path: Stochastic depth probability. + """ + super().__init__() + out_chs = out_chs or in_chs + dilation = to_ntuple(2)(dilation) + act_layer = get_act_layer(act_layer) + if not norm_layer: + norm_layer = LayerNorm2d if conv_mlp else LayerNorm + mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp) + self.use_conv_mlp = conv_mlp + self.conv_dw = create_conv2d( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=dilation[0], + depthwise=True, + bias=conv_bias, + ) + self.norm = norm_layer(out_chs) + self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0]) + else: + self.shortcut = nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + if self.use_conv_mlp: + x = self.norm(x) + x = self.mlp(x) + else: + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + x = self.mlp(x) + x = x.permute(0, 3, 1, 2) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + + x = self.drop_path(x) + self.shortcut(shortcut) + return x + + +class ConvNeXtStage(nn.Module): + + def __init__( + self, + in_chs, + out_chs, + kernel_size=7, + stride=2, + depth=2, + dilation=(1, 1), + drop_path_rates=None, + ls_init_value=1.0, + conv_mlp=False, + conv_bias=True, + use_grn=False, + act_layer='gelu', + norm_layer=None, + norm_layer_cl=None + ): + super().__init__() + self.grad_checkpointing = False + + if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: + ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 + pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used + self.downsample = nn.Sequential( + norm_layer(in_chs), + create_conv2d( + in_chs, + out_chs, + kernel_size=ds_ks, + stride=stride, + dilation=dilation[0], + padding=pad, + bias=conv_bias, + ), + ) + in_chs = out_chs + else: + self.downsample = nn.Identity() + + drop_path_rates = drop_path_rates or [0.] * depth + stage_blocks = [] + for i in range(depth): + stage_blocks.append(ConvNeXtBlock( + in_chs=in_chs, + out_chs=out_chs, + kernel_size=kernel_size, + dilation=dilation[1], + drop_path=drop_path_rates[i], + ls_init_value=ls_init_value, + conv_mlp=conv_mlp, + conv_bias=conv_bias, + use_grn=use_grn, + act_layer=act_layer, + norm_layer=norm_layer if conv_mlp else norm_layer_cl, + )) + in_chs = out_chs + self.blocks = nn.Sequential(*stage_blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class ConvNeXt(nn.Module): + r""" ConvNeXt + A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf + """ + + def __init__( + self, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + output_stride: int = 32, + depths: Tuple[int, ...] = (3, 3, 9, 3), + dims: Tuple[int, ...] = (96, 192, 384, 768), + kernel_sizes: Union[int, Tuple[int, ...]] = 7, + ls_init_value: Optional[float] = 1e-6, + stem_type: str = 'patch', + patch_size: int = 4, + head_init_scale: float = 1., + head_norm_first: bool = False, + head_hidden_size: Optional[int] = None, + conv_mlp: bool = False, + conv_bias: bool = True, + use_grn: bool = False, + act_layer: Union[str, Callable] = 'gelu', + norm_layer: Optional[Union[str, Callable]] = None, + norm_eps: Optional[float] = None, + drop_rate: float = 0., + drop_path_rate: float = 0., + ): + """ + Args: + in_chans: Number of input image channels. + num_classes: Number of classes for classification head. + global_pool: Global pooling type. + output_stride: Output stride of network, one of (8, 16, 32). + depths: Number of blocks at each stage. + dims: Feature dimension at each stage. + kernel_sizes: Depthwise convolution kernel-sizes for each stage. + ls_init_value: Init value for Layer Scale, disabled if None. + stem_type: Type of stem. + patch_size: Stem patch size for patch stem. + head_init_scale: Init scaling value for classifier weights and biases. + head_norm_first: Apply normalization before global pool + head. + head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False. + conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last. + conv_bias: Use bias layers w/ all convolutions. + use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP. + act_layer: Activation layer type. + norm_layer: Normalization layer type. + drop_rate: Head pre-classifier dropout rate. + drop_path_rate: Stochastic depth drop rate. + """ + super().__init__() + assert output_stride in (8, 16, 32) + kernel_sizes = to_ntuple(4)(kernel_sizes) + use_rms = isinstance(norm_layer, str) and norm_layer.startswith('rmsnorm') + if norm_layer is None or use_rms: + norm_layer = RmsNorm2d if use_rms else LayerNorm2d + norm_layer_cl = norm_layer if conv_mlp else (RmsNorm if use_rms else LayerNorm) + if norm_eps is not None: + norm_layer = partial(norm_layer, eps=norm_eps) + norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) + else: + assert conv_mlp,\ + 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' + norm_layer = get_norm_layer(norm_layer) + norm_layer_cl = norm_layer + if norm_eps is not None: + norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) + act_layer = get_act_layer(act_layer) + + self.num_classes = num_classes + self.drop_rate = drop_rate + self.feature_info = [] + + assert stem_type in ('patch', 'overlap', 'overlap_tiered', 'overlap_act') + if stem_type == 'patch': + # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), + norm_layer(dims[0]), + ) + stem_stride = patch_size + else: + mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] + self.stem = nn.Sequential(*filter(None, [ + nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), + act_layer() if 'act' in stem_type else None, + nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), + norm_layer(dims[0]), + ])) + stem_stride = 4 + + self.stages = nn.Sequential() + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + prev_chs = dims[0] + curr_stride = stem_stride + dilation = 1 + # 4 feature resolution stages, each consisting of multiple residual blocks + for i in range(4): + stride = 2 if curr_stride == 2 or i > 0 else 1 + if curr_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + curr_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + out_chs = dims[i] + stages.append(ConvNeXtStage( + prev_chs, + out_chs, + kernel_size=kernel_sizes[i], + stride=stride, + dilation=(first_dilation, dilation), + depth=depths[i], + drop_path_rates=dp_rates[i], + ls_init_value=ls_init_value, + conv_mlp=conv_mlp, + conv_bias=conv_bias, + use_grn=use_grn, + act_layer=act_layer, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + )) + prev_chs = out_chs + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + self.num_features = self.head_hidden_size = prev_chs + + # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets + # otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) + if head_norm_first: + assert not head_hidden_size + self.norm_pre = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + else: + self.norm_pre = nn.Identity() + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + hidden_size=head_hidden_size, + pool_type=global_pool, + drop_rate=self.drop_rate, + norm_layer=norm_layer, + act_layer='gelu', + ) + self.head_hidden_size = self.head.num_features + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm_pre', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices) + + # forward pass + feat_idx = 0 # stem is index 0 + x = self.stem(x) + if feat_idx in take_indices: + intermediates.append(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index] + for stage in stages: + feat_idx += 1 + x = stage(x) + if feat_idx in take_indices: + # NOTE not bothering to apply norm_pre when norm=True as almost no models have it enabled + intermediates.append(x) + + if intermediates_only: + return intermediates + + x = self.norm_pre(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices) + self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 + if prune_norm: + self.norm_pre = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + nn.init.zeros_(module.bias) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap FB checkpoints -> timm """ + if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: + return state_dict # non-FB checkpoint + if 'model' in state_dict: + state_dict = state_dict['model'] + + out_dict = {} + if 'visual.trunk.stem.0.weight' in state_dict: + out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')} + if 'visual.head.proj.weight' in state_dict: + out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight'] + out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) + elif 'visual.head.mlp.fc1.weight' in state_dict: + out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight'] + out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias'] + out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight'] + out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0]) + return out_dict + + import re + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) + k = k.replace('dwconv', 'conv_dw') + k = k.replace('pwconv', 'mlp.fc') + if 'grn' in k: + k = k.replace('grn.beta', 'mlp.grn.bias') + k = k.replace('grn.gamma', 'mlp.grn.weight') + v = v.reshape(v.shape[-1]) + k = k.replace('head.', 'head.fc.') + if k.startswith('norm.'): + k = k.replace('norm', 'head.norm') + if v.ndim == 2 and 'head' not in k: + model_shape = model.state_dict()[k].shape + v = v.reshape(model_shape) + out_dict[k] = v + + return out_dict + + +def _create_convnext(variant, pretrained=False, **kwargs): + if kwargs.get('pretrained_cfg', '') == 'fcmae': + # NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`) + # This is workaround loading with num_classes=0 w/o removing norm-layer. + kwargs.setdefault('pretrained_strict', False) + + model = build_model_with_cfg( + ConvNeXt, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgv2(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + 'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808', + 'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders', + 'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # timm specific variants + 'convnext_tiny.in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_small.in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'convnext_zepto_rms.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), + 'convnext_zepto_rms_ols.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.9), + 'convnext_atto.d2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnext_atto_ols.a2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnext_atto_rms.untrained': _cfg( + #hf_hub_id='timm/', + test_input_size=(3, 256, 256), test_crop_pct=0.95), + 'convnext_femto.d1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnext_femto_ols.d1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnext_pico.d1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnext_pico_ols.d1_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_nano.in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_nano.d1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_nano_ols.d1h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_tiny_hnf.a2h_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'convnext_tiny.in12k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_small.in12k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'convnext_nano.in12k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, num_classes=11821), + 'convnext_tiny.in12k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, num_classes=11821), + 'convnext_small.in12k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, num_classes=11821), + + 'convnext_tiny.fb_in22k_ft_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_small.fb_in22k_ft_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_base.fb_in22k_ft_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_large.fb_in22k_ft_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_xlarge.fb_in22k_ft_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'convnext_tiny.fb_in1k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_small.fb_in1k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_base.fb_in1k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnext_large.fb_in1k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'convnext_tiny.fb_in22k_ft_in1k_384': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_small.fb_in22k_ft_in1k_384': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_base.fb_in22k_ft_in1k_384': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_large.fb_in22k_ft_in1k_384': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'convnext_tiny.fb_in22k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", + hf_hub_id='timm/', + num_classes=21841), + 'convnext_small.fb_in22k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", + hf_hub_id='timm/', + num_classes=21841), + 'convnext_base.fb_in22k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", + hf_hub_id='timm/', + num_classes=21841), + 'convnext_large.fb_in22k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", + hf_hub_id='timm/', + num_classes=21841), + 'convnext_xlarge.fb_in22k': _cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", + hf_hub_id='timm/', + num_classes=21841), + + 'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt', + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt", + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt", + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt", + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt", + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt", + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'), + + 'convnextv2_atto.fcmae_ft_in1k': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnextv2_femto.fcmae_ft_in1k': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnextv2_pico.fcmae_ft_in1k': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'convnextv2_nano.fcmae_ft_in1k': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt', + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_tiny.fcmae_ft_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_base.fcmae_ft_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_large.fcmae_ft_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'convnextv2_huge.fcmae_ft_in1k': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt", + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'convnextv2_atto.fcmae': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt', + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_femto.fcmae': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt', + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_pico.fcmae': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt', + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_nano.fcmae': _cfgv2( + url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt', + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_tiny.fcmae': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt", + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_base.fcmae': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt", + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_large.fcmae': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt", + hf_hub_id='timm/', + num_classes=0), + 'convnextv2_huge.fcmae': _cfgv2( + url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt", + hf_hub_id='timm/', + num_classes=0), + + 'convnextv2_small.untrained': _cfg(), + + # CLIP weights, fine-tuned on in1k or in12k + in1k + 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), + 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), + 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), + 'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0 + ), + 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash' + ), + 'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), + + 'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), + 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), + 'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), + + # CLIP original image tower weights + 'convnext_base.clip_laion2b': _cfg( + hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), + 'convnext_base.clip_laion2b_augreg': _cfg( + hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), + 'convnext_base.clip_laiona': _cfg( + hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), + 'convnext_base.clip_laiona_320': _cfg( + hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), + 'convnext_base.clip_laiona_augreg_320': _cfg( + hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), + 'convnext_large_mlp.clip_laion2b_augreg': _cfg( + hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768), + 'convnext_large_mlp.clip_laion2b_ft_320': _cfg( + hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), + 'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg( + hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), + 'convnext_xxlarge.clip_laion2b_soup': _cfg( + hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), + 'convnext_xxlarge.clip_laion2b_rewind': _cfg( + hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), + + "test_convnext.r160_in1k": _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + "test_convnext2.r160_in1k": _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + "test_convnext3.r160_in1k": _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + +}) + + +@register_model +def convnext_zepto_rms(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict(depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='rmsnorm2d') + model = _create_convnext('convnext_zepto_rms', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_zepto_rms_ols(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict( + depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='rmsnorm2d', stem_type='overlap_act') + model = _create_convnext('convnext_zepto_rms_ols', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True) + model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M + model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered') + model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_atto_rms(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, norm_layer='rmsnorm2d') + model = _create_convnext('convnext_atto_rms', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant + model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True) + model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant + model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered') + model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt: + # timm pico variant + model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True) + model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt: + # timm nano variant with overlapping 3x3 conv stem + model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered') + model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt: + # timm nano variant with standard stem and head + model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True) + model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt: + # experimental nano variant with overlapping conv stem + model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap') + model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt: + # experimental tiny variant with norm before pooling in head (head norm first) + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True) + model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768)) + model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_small(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) + model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_base(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) + model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_large(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) + model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536) + model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]) + model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5)) + model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict( + depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True) + model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt: + # timm femto variant + model_args = dict( + depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True) + model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt: + # timm pico variant + model_args = dict( + depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True) + model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt: + # timm nano variant with standard stem and head + model_args = dict( + depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True) + model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None) + model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None) + model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None) + model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None) + model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None) + model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_convnext(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[1, 2, 4, 2], dims=[24, 32, 48, 64], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh') + model = _create_convnext('test_convnext', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_convnext2(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict(depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh') + model = _create_convnext('test_convnext2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_convnext3(pretrained=False, **kwargs) -> ConvNeXt: + model_args = dict( + depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), kernel_sizes=(7, 5, 5, 3), act_layer='silu') + model = _create_convnext('test_convnext3', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + + +register_model_deprecations(__name__, { + 'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k', + 'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k', + 'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k', + 'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k', + 'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k', + 'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384', + 'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384', + 'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384', + 'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384', + 'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384', + 'convnext_tiny_in22k': 'convnext_tiny.fb_in22k', + 'convnext_small_in22k': 'convnext_small.fb_in22k', + 'convnext_base_in22k': 'convnext_base.fb_in22k', + 'convnext_large_in22k': 'convnext_large.fb_in22k', + 'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k', +}) diff --git a/pytorch-image-models/timm/models/crossvit.py b/pytorch-image-models/timm/models/crossvit.py new file mode 100644 index 0000000000000000000000000000000000000000..ff78395ecf64df68b4df86b491109e528335a959 --- /dev/null +++ b/pytorch-image-models/timm/models/crossvit.py @@ -0,0 +1,627 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" +from functools import partial +from typing import List, Optional, Tuple + +import torch +import torch.hub +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_function +from ._registry import register_model, generate_default_cfgs +from .vision_transformer import Block + +__all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + return x + + +class MultiScaleBlock(nn.Module): + + def __init__( + self, + dim, + patches, + depth, + num_heads, + mlp_ratio, + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], + num_heads=num_heads[d], + mlp_ratio=mlp_ratio[d], + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i], + norm_layer=norm_layer, + )) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], + num_heads=nh, + mlp_ratio=mlp_ratio[d], + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[-1], + norm_layer=norm_layer, + )) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], + num_heads=nh, + mlp_ratio=mlp_ratio[d], + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[-1], + norm_layer=norm_layer, + )) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +@register_notrace_function +def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript + """ + Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing. + Args: + x (Tensor): input image + ss (tuple[int, int]): height and width to scale to + crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False + Returns: + Tensor: the "scaled" image batch tensor + """ + H, W = x.shape[-2:] + if H != ss[0] or W != ss[1]: + if crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) + return x + + +class CrossVit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, + img_size=224, + img_scale=(1.0, 1.0), + patch_size=(8, 16), + in_chans=3, + num_classes=1000, + embed_dim=(192, 384), + depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), + num_heads=(6, 12), + mlp_ratio=(2., 2., 4.), + multi_conv=False, + crop_scale=False, + qkv_bias=True, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + global_pool='token', + ): + super().__init__() + assert global_pool in ('token', 'avg') + + self.num_classes = num_classes + self.global_pool = global_pool + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = self.head_hidden_size = sum(embed_dim) + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed( + img_size=im_s, + patch_size=p, + in_chans=in_chans, + embed_dim=d, + multi_conv=multi_conv, + )) + + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, + num_patches, + block_cfg, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr_, + norm_layer=norm_layer, + ) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('token', 'avg') + self.global_pool = global_pool + self.head = nn.ModuleList([ + nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches) + ]) + + def forward_features(self, x) -> List[torch.Tensor]: + B = x.shape[0] + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + x_ = scale_image(x_, ss, self.crop_scale) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return xs + + def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor: + xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs] + xs = [self.head_drop(x) for x in xs] + if pre_logits or isinstance(self.head[0], nn.Identity): + return torch.cat([x for x in xs], dim=1) + return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0) + + def forward(self, x): + xs = self.forward_features(x) + x = self.forward_head(xs) + return x + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossVit, + variant, + pretrained, + pretrained_filter_fn=pretrained_filter_fn, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'), + 'crossvit_15_dagger_240.in1k': _cfg( + hf_hub_id='timm/', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'), + 'crossvit_18_dagger_240.in1k': _cfg( + hf_hub_id='timm/', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'), + 'crossvit_9_dagger_240.in1k': _cfg( + hf_hub_id='timm/', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'), + 'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'), + 'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1]) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1]) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1]) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1]) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1]) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit: + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/cspnet.py b/pytorch-image-models/timm/models/cspnet.py new file mode 100644 index 0000000000000000000000000000000000000000..81d11a06546d95af547e3e45631219c9d6d130f9 --- /dev/null +++ b/pytorch-image-models/timm/models/cspnet.py @@ -0,0 +1,1114 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +from dataclasses import dataclass, asdict, replace +from functools import partial +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ClassifierHead, ConvNormAct, DropPath, get_attn, create_act_layer, make_divisible +from ._builder import build_model_with_cfg +from ._manipulate import named_apply, MATCH_PREV_GROUP +from ._registry import register_model, generate_default_cfgs + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +@dataclass +class CspStemCfg: + out_chs: Union[int, Tuple[int, ...]] = 32 + stride: Union[int, Tuple[int, ...]] = 2 + kernel_size: int = 3 + padding: Union[int, str] = '' + pool: Optional[str] = '' + + +def _pad_arg(x, n): + # pads an argument tuple to specified n by padding with last value + if not isinstance(x, (tuple, list)): + x = (x,) + curr_n = len(x) + pad_n = n - curr_n + if pad_n <= 0: + return x[:n] + return tuple(x + (x[-1],) * pad_n) + + +@dataclass +class CspStagesCfg: + depth: Tuple[int, ...] = (3, 3, 5, 2) # block depth (number of block repeats in stages) + out_chs: Tuple[int, ...] = (128, 256, 512, 1024) # number of output channels for blocks in stage + stride: Union[int, Tuple[int, ...]] = 2 # stride of stage + groups: Union[int, Tuple[int, ...]] = 1 # num kxk conv groups + block_ratio: Union[float, Tuple[float, ...]] = 1.0 + bottle_ratio: Union[float, Tuple[float, ...]] = 1. # bottleneck-ratio of blocks in stage + avg_down: Union[bool, Tuple[bool, ...]] = False + attn_layer: Optional[Union[str, Tuple[str, ...]]] = None + attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None + stage_type: Union[str, Tuple[str]] = 'csp' # stage type ('csp', 'cs2', 'dark') + block_type: Union[str, Tuple[str]] = 'bottle' # blocks type for stages ('bottle', 'dark') + + # cross-stage only + expand_ratio: Union[float, Tuple[float, ...]] = 1.0 + cross_linear: Union[bool, Tuple[bool, ...]] = False + down_growth: Union[bool, Tuple[bool, ...]] = False + + def __post_init__(self): + n = len(self.depth) + assert len(self.out_chs) == n + self.stride = _pad_arg(self.stride, n) + self.groups = _pad_arg(self.groups, n) + self.block_ratio = _pad_arg(self.block_ratio, n) + self.bottle_ratio = _pad_arg(self.bottle_ratio, n) + self.avg_down = _pad_arg(self.avg_down, n) + self.attn_layer = _pad_arg(self.attn_layer, n) + self.attn_kwargs = _pad_arg(self.attn_kwargs, n) + self.stage_type = _pad_arg(self.stage_type, n) + self.block_type = _pad_arg(self.block_type, n) + + self.expand_ratio = _pad_arg(self.expand_ratio, n) + self.cross_linear = _pad_arg(self.cross_linear, n) + self.down_growth = _pad_arg(self.down_growth, n) + + +@dataclass +class CspModelCfg: + stem: CspStemCfg + stages: CspStagesCfg + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + act_layer: str = 'leaky_relu' + norm_layer: str = 'batchnorm' + aa_layer: Optional[str] = None # FIXME support string factory for this + + +def _cs3_cfg( + width_multiplier=1.0, + depth_multiplier=1.0, + avg_down=False, + act_layer='silu', + focus=False, + attn_layer=None, + attn_kwargs=None, + bottle_ratio=1.0, + block_type='dark', +): + if focus: + stem_cfg = CspStemCfg( + out_chs=make_divisible(64 * width_multiplier), + kernel_size=6, stride=2, padding=2, pool='') + else: + stem_cfg = CspStemCfg( + out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]), + kernel_size=3, stride=2, pool='') + return CspModelCfg( + stem=stem_cfg, + stages=CspStagesCfg( + out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]), + depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]), + stride=2, + bottle_ratio=bottle_ratio, + block_ratio=0.5, + avg_down=avg_down, + attn_layer=attn_layer, + attn_kwargs=attn_kwargs, + stage_type='cs3', + block_type=block_type, + ), + act_layer=act_layer, + ) + + +class BottleneckBlock(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.25, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_last=False, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(BottleneckBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + attn_last = attn_layer is not None and attn_last + attn_first = attn_layer is not None and not attn_last + + self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvNormAct( + mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity() + self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + self.act3 = create_act_layer(act_layer) + + def zero_init_last(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.attn2(x) + x = self.conv3(x) + x = self.attn3(x) + x = self.drop_path(x) + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.5, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + + self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() + self.conv2 = ConvNormAct( + mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.attn(x) + x = self.conv2(x) + x = self.drop_path(x) + shortcut + return x + + +class EdgeBlock(nn.Module): + """ EdgeResidual / Fused-MBConv / MobileNetV1-like 3x3 + 1x1 block (w/ activated output) + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.5, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(EdgeBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + + self.conv1 = ConvNormAct( + in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() + self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs) + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.attn(x) + x = self.conv2(x) + x = self.drop_path(x) + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + expand_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + down_growth=False, + cross_linear=False, + block_dpr=None, + block_fn=BottleneckBlock, + **block_kwargs, + ): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if stride != 1 or first_dilation != dilation: + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = nn.Identity() + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs, + )) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + x = self.conv_down(x) + x = self.conv_exp(x) + xs, xb = x.split(self.expand_chs // 2, dim=1) + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class CrossStage3(nn.Module): + """Cross Stage 3. + Similar to CrossStage, but with only one transition conv for the output. + """ + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + expand_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + down_growth=False, + cross_linear=False, + block_dpr=None, + block_fn=BottleneckBlock, + **block_kwargs, + ): + super(CrossStage3, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if stride != 1 or first_dilation != dilation: + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # expansion conv + self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # expanded output is split in 2 for blocks and cross stage + + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs, + )) + prev_chs = block_out_chs + + # transition convs + self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + x = self.conv_down(x) + x = self.conv_exp(x) + x1, x2 = x.split(self.expand_chs // 2, dim=1) + x1 = self.blocks(x1) + out = self.conv_transition(torch.cat([x1, x2], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + block_fn=BottleneckBlock, + block_dpr=None, + **block_kwargs, + ): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormAct( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs + )) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def create_csp_stem( + in_chans=3, + out_chs=32, + kernel_size=3, + stride=2, + pool='', + padding='', + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + aa_layer=None, +): + stem = nn.Sequential() + feature_info = [] + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + stem_depth = len(out_chs) + assert stem_depth + assert stride in (1, 2, 4) + prev_feat = None + prev_chs = in_chans + last_idx = stem_depth - 1 + stem_stride = 1 + for i, chs in enumerate(out_chs): + conv_name = f'conv{i + 1}' + conv_stride = 2 if (i == 0 and stride > 1) or (i == last_idx and stride > 2 and not pool) else 1 + if conv_stride > 1 and prev_feat is not None: + feature_info.append(prev_feat) + stem.add_module(conv_name, ConvNormAct( + prev_chs, chs, kernel_size, + stride=conv_stride, + padding=padding if i == 0 else '', + act_layer=act_layer, + norm_layer=norm_layer, + )) + stem_stride *= conv_stride + prev_chs = chs + prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name])) + if pool: + assert stride > 2 + if prev_feat is not None: + feature_info.append(prev_feat) + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=prev_chs, stride=2)) + pool_name = 'aa' + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + pool_name = 'pool' + stem_stride *= 2 + prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name])) + feature_info.append(prev_feat) + return stem, feature_info + + +def _get_stage_fn(stage_args): + stage_type = stage_args.pop('stage_type') + assert stage_type in ('dark', 'csp', 'cs3') + if stage_type == 'dark': + stage_args.pop('expand_ratio', None) + stage_args.pop('cross_linear', None) + stage_args.pop('down_growth', None) + stage_fn = DarkStage + elif stage_type == 'csp': + stage_fn = CrossStage + else: + stage_fn = CrossStage3 + return stage_fn, stage_args + + +def _get_block_fn(stage_args): + block_type = stage_args.pop('block_type') + assert block_type in ('dark', 'edge', 'bottle') + if block_type == 'dark': + return DarkBlock, stage_args + elif block_type == 'edge': + return EdgeBlock, stage_args + else: + return BottleneckBlock, stage_args + + +def _get_attn_fn(stage_args): + attn_layer = stage_args.pop('attn_layer') + attn_kwargs = stage_args.pop('attn_kwargs', None) or {} + if attn_layer is not None: + attn_layer = get_attn(attn_layer) + if attn_kwargs: + attn_layer = partial(attn_layer, **attn_kwargs) + return attn_layer, stage_args + + +def create_csp_stages( + cfg: CspModelCfg, + drop_path_rate: float, + output_stride: int, + stem_feat: Dict[str, Any], +): + cfg_dict = asdict(cfg.stages) + num_stages = len(cfg.stages.depth) + cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)] + stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())] + block_kwargs = dict( + act_layer=cfg.act_layer, + norm_layer=cfg.norm_layer, + ) + + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + feature_info = [] + stages = [] + for stage_idx, stage_args in enumerate(stage_args): + stage_fn, stage_args = _get_stage_fn(stage_args) + block_fn, stage_args = _get_block_fn(stage_args) + attn_fn, stage_args = _get_attn_fn(stage_args) + stride = stage_args.pop('stride') + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + stages += [stage_fn( + prev_chs, + **stage_args, + stride=stride, + first_dilation=first_dilation, + dilation=dilation, + block_fn=block_fn, + aa_layer=cfg.aa_layer, + attn_layer=attn_fn, # will be passed through stage as block_kwargs + **block_kwargs, + )] + prev_chs = stage_args['out_chs'] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__( + self, + cfg: CspModelCfg, + in_chans=3, + num_classes=1000, + output_stride=32, + global_pool='avg', + drop_rate=0., + drop_path_rate=0., + zero_init_last=True, + **kwargs, + ): + """ + Args: + cfg (CspModelCfg): Model architecture configuration + in_chans (int): Number of input channels (default: 3) + num_classes (int): Number of classifier classes (default: 1000) + output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) + global_pool (str): Global pooling type (default: 'avg') + drop_rate (float): Dropout rate (default: 0.) + drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) + zero_init_last (bool): Zero-init last weight of residual path + kwargs (dict): Extra kwargs overlayed onto cfg + """ + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg + layer_args = dict( + act_layer=cfg.act_layer, + norm_layer=cfg.norm_layer, + aa_layer=cfg.aa_layer + ) + self.feature_info = [] + + # Construct the stem + self.stem, stem_feat_info = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args) + self.feature_info.extend(stem_feat_info[:-1]) + + # Construct the stages + self.stages, stage_feat_info = create_csp_stages( + cfg, + drop_path_rate=drop_path_rate, + output_stride=output_stride, + stem_feat=stem_feat_info[-1], + ) + prev_chs = stage_feat_info[-1]['num_chs'] + self.feature_info.extend(stage_feat_info) + + # Construct the head + self.num_features = self.head_hidden_size = prev_chs + self.head = ClassifierHead( + in_features=prev_chs, + num_classes=num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^stages\.(\d+)\..*transition', MATCH_PREV_GROUP), # map to last block in stage + (r'^stages\.(\d+)', (0,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name, zero_init_last=False): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +model_cfgs = dict( + cspresnet50=CspModelCfg( + stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(128, 256, 512, 1024), + stride=(1, 2), + expand_ratio=2., + bottle_ratio=0.5, + cross_linear=True, + ), + ), + cspresnet50d=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(128, 256, 512, 1024), + stride=(1,) + (2,), + expand_ratio=2., + bottle_ratio=0.5, + block_ratio=1., + cross_linear=True, + ), + ), + cspresnet50w=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(256, 512, 1024, 2048), + stride=(1,) + (2,), + expand_ratio=1., + bottle_ratio=0.25, + block_ratio=0.5, + cross_linear=True, + ), + ), + cspresnext50=CspModelCfg( + stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(256, 512, 1024, 2048), + stride=(1,) + (2,), + groups=32, + expand_ratio=1., + bottle_ratio=1., + block_ratio=0.5, + cross_linear=True, + ), + ), + cspdarknet53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + expand_ratio=(2.,) + (1.,), + bottle_ratio=(0.5,) + (1.,), + block_ratio=(1.,) + (0.5,), + down_growth=True, + block_type='dark', + ), + ), + darknet17=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1,) * 5, + out_chs=(64, 128, 256, 512, 1024), + stride=(2,), + bottle_ratio=(0.5,), + block_ratio=(1.,), + stage_type='dark', + block_type='dark', + ), + ), + darknet21=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 1, 1, 2, 2), + out_chs=(64, 128, 256, 512, 1024), + stride=(2,), + bottle_ratio=(0.5,), + block_ratio=(1.,), + stage_type='dark', + block_type='dark', + + ), + ), + sedarknet21=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 1, 1, 2, 2), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + attn_layer='se', + stage_type='dark', + block_type='dark', + + ), + ), + darknet53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + stage_type='dark', + block_type='dark', + ), + ), + darknetaa53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + avg_down=True, + stage_type='dark', + block_type='dark', + ), + ), + + cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5), + cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67), + cs3darknet_l=_cs3_cfg(), + cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33), + + cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True), + cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True), + cs3darknet_focus_l=_cs3_cfg(focus=True), + cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True), + + cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), + cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33), + + cs3sedarknet_xdw=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''), + stages=CspStagesCfg( + depth=(3, 6, 12, 4), + out_chs=(256, 512, 1024, 2048), + stride=2, + groups=(1, 1, 256, 512), + bottle_ratio=0.5, + block_ratio=0.5, + attn_layer='se', + ), + act_layer='silu', + ), + + cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'), + cs3se_edgenet_x=_cs3_cfg( + width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge', + attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), +) + + +def _create_cspnet(variant, pretrained=False, **kwargs): + if variant.startswith('darknet') or variant.startswith('cspdarknet'): + # NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5] + default_out_indices = (0, 1, 2, 3, 4, 5) + else: + default_out_indices = (0, 1, 2, 3, 4) + out_indices = kwargs.pop('out_indices', default_out_indices) + return build_model_with_cfg( + CspNet, variant, pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'cspresnet50.ra_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d.untrained': _cfg(), + 'cspresnet50w.untrained': _cfg(), + 'cspresnext50.ra_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + ), + 'cspdarknet53.ra_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + + 'darknet17.untrained': _cfg(), + 'darknet21.untrained': _cfg(), + 'sedarknet21.untrained': _cfg(), + 'darknet53.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'darknetaa53.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3darknet_s.untrained': _cfg(interpolation='bicubic'), + 'cs3darknet_m.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95, + ), + 'cs3darknet_l.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_x.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3darknet_focus_s.ra4_e3600_r256_in1k': _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'cs3darknet_focus_m.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_focus_l.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_focus_x.untrained': _cfg(interpolation='bicubic'), + + 'cs3sedarknet_l.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3sedarknet_x.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3sedarknet_xdw.untrained': _cfg(interpolation='bicubic'), + + 'cs3edgenet_x.c2_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'cs3se_edgenet_x.c2ns_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), +}) + + +@register_model +def cspresnet50(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs) + + +@register_model +def darknet17(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('darknet17', pretrained=pretrained, **kwargs) + + +@register_model +def darknet21(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('darknet21', pretrained=pretrained, **kwargs) + + +@register_model +def sedarknet21(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('darknet53', pretrained=pretrained, **kwargs) + + +@register_model +def darknetaa53(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_s(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_m(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_l(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_x(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_s(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_m(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_l(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_x(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_l(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_x(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_xdw(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs) + + +@register_model +def cs3edgenet_x(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3se_edgenet_x(pretrained=False, **kwargs) -> CspNet: + return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/davit.py b/pytorch-image-models/timm/models/davit.py new file mode 100644 index 0000000000000000000000000000000000000000..b58bbbbfbe15b3fc9f5be02e5191c0067a925130 --- /dev/null +++ b/pytorch-image-models/timm/models/davit.py @@ -0,0 +1,816 @@ +""" DaViT: Dual Attention Vision Transformers + +As described in https://arxiv.org/abs/2204.03645 + +Input size invariant transformer architecture that combines channel and spacial +attention in each block. The attention mechanisms used are linear in complexity. + +DaViT model defs and weights adapted from https://github.com/dingmyu/davit, original copyright below + +""" +# Copyright (c) 2022 Mingyu Ding +# All rights reserved. +# This source code is licensed under the MIT license +from functools import partial +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, to_2tuple, trunc_normal_, Mlp, LayerNorm2d, get_norm_layer, use_fused_attn +from timm.layers import NormMlpClassifierHead, ClassifierHead +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_function +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['DaVit'] + + +class ConvPosEnc(nn.Module): + def __init__(self, dim: int, k: int = 3, act: bool = False): + super(ConvPosEnc, self).__init__() + + self.proj = nn.Conv2d( + dim, + dim, + kernel_size=k, + stride=1, + padding=k // 2, + groups=dim, + ) + self.act = nn.GELU() if act else nn.Identity() + + def forward(self, x: Tensor): + feat = self.proj(x) + x = x + self.act(feat) + return x + + +class Stem(nn.Module): + """ Size-agnostic implementation of 2D image to patch embedding, + allowing input size to be adjusted during model forward operation + """ + + def __init__( + self, + in_chs=3, + out_chs=96, + stride=4, + norm_layer=LayerNorm2d, + ): + super().__init__() + stride = to_2tuple(stride) + self.stride = stride + self.in_chs = in_chs + self.out_chs = out_chs + assert stride[0] == 4 # only setup for stride==4 + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=7, + stride=stride, + padding=3, + ) + self.norm = norm_layer(out_chs) + + def forward(self, x: Tensor): + B, C, H, W = x.shape + pad_r = (self.stride[1] - W % self.stride[1]) % self.stride[1] + pad_b = (self.stride[0] - H % self.stride[0]) % self.stride[0] + x = F.pad(x, (0, pad_r, 0, pad_b)) + x = self.conv(x) + x = self.norm(x) + return x + + +class Downsample(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=3, + norm_layer=LayerNorm2d, + ): + super().__init__() + self.in_chs = in_chs + self.out_chs = out_chs + + self.norm = norm_layer(in_chs) + self.even_k = kernel_size % 2 == 0 + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=2, + padding=0 if self.even_k else kernel_size // 2, + ) + + def forward(self, x: Tensor): + B, C, H, W = x.shape + x = self.norm(x) + if self.even_k: + k_h, k_w = self.conv.kernel_size + pad_r = (k_w - W % k_w) % k_w + pad_b = (k_h - H % k_h) % k_h + x = F.pad(x, (0, pad_r , 0, pad_b)) + x = self.conv(x) + return x + + +class ChannelAttentionV2(nn.Module): + + def __init__(self, dim, num_heads=8, qkv_bias=True, dynamic_scale=True): + super().__init__() + self.groups = num_heads + self.head_dim = dim // num_heads + self.dynamic_scale = dynamic_scale + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + def forward(self, x): + B, N, C = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.groups, C // self.groups).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + if self.dynamic_scale: + q = q * N ** -0.5 + else: + q = q * self.head_dim ** -0.5 + attn = q.transpose(-1, -2) @ k + attn = attn.softmax(dim=-1) + x = (attn @ v.transpose(-1, -2)).transpose(-1, -2) + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + return x + + + +class ChannelAttention(nn.Module): + + def __init__(self, dim, num_heads=8, qkv_bias=False): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + def forward(self, x: Tensor): + B, N, C = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + k = k * self.scale + attn = k.transpose(-1, -2) @ v + attn = attn.softmax(dim=-1) + x = (attn @ q.transpose(-1, -2)).transpose(-1, -2) + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + return x + + +class ChannelBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ffn=True, + cpe_act=False, + v2=False, + ): + super().__init__() + + self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) + self.ffn = ffn + self.norm1 = norm_layer(dim) + attn_layer = ChannelAttentionV2 if v2 else ChannelAttention + self.attn = attn_layer( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) + + if self.ffn: + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + else: + self.norm2 = None + self.mlp = None + self.drop_path2 = None + + def forward(self, x: Tensor): + B, C, H, W = x.shape + + x = self.cpe1(x).flatten(2).transpose(1, 2) + + cur = self.norm1(x) + cur = self.attn(cur) + x = x + self.drop_path1(cur) + + x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) + + if self.mlp is not None: + x = x.flatten(2).transpose(1, 2) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + x = x.transpose(1, 2).view(B, C, H, W) + + return x + + +def window_partition(x: Tensor, window_size: Tuple[int, int]): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows: Tensor, window_size: Tuple[int, int], H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + """ + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, window_size, num_heads, qkv_bias=True): + super().__init__() + self.dim = dim + self.window_size = window_size + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x: Tensor): + B_, N, C = x.shape + + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v) + else: + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + attn = self.softmax(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + return x + + +class SpatialBlock(nn.Module): + r""" Windows Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + mlp_ratio=4., + qkv_bias=True, + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ffn=True, + cpe_act=False, + ): + super().__init__() + self.dim = dim + self.ffn = ffn + self.num_heads = num_heads + self.window_size = to_2tuple(window_size) + self.mlp_ratio = mlp_ratio + + self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + self.window_size, + num_heads=num_heads, + qkv_bias=qkv_bias, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) + if self.ffn: + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + else: + self.norm2 = None + self.mlp = None + self.drop_path1 = None + + def forward(self, x: Tensor): + B, C, H, W = x.shape + + shortcut = self.cpe1(x).flatten(2).transpose(1, 2) + + x = self.norm1(shortcut) + x = x.view(B, H, W, C) + + pad_l = pad_t = 0 + pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] + pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + x_windows = window_partition(x, self.window_size) + x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + x = window_reverse(attn_windows, self.window_size, Hp, Wp) + + # if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + x = shortcut + self.drop_path1(x) + + x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) + + if self.mlp is not None: + x = x.flatten(2).transpose(1, 2) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + x = x.transpose(1, 2).view(B, C, H, W) + + return x + + +class DaVitStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + depth=1, + downsample=True, + attn_types=('spatial', 'channel'), + num_heads=3, + window_size=7, + mlp_ratio=4., + qkv_bias=True, + drop_path_rates=(0, 0), + norm_layer=LayerNorm2d, + norm_layer_cl=nn.LayerNorm, + ffn=True, + cpe_act=False, + down_kernel_size=2, + named_blocks=False, + channel_attn_v2=False, + ): + super().__init__() + + self.grad_checkpointing = False + + # downsample embedding layer at the beginning of each stage + if downsample: + self.downsample = Downsample(in_chs, out_chs, kernel_size=down_kernel_size, norm_layer=norm_layer) + else: + self.downsample = nn.Identity() + + ''' + repeating alternating attention blocks in each stage + default: (spatial -> channel) x depth + + potential opportunity to integrate with a more general version of ByobNet/ByoaNet + since the logic is similar + ''' + stage_blocks = [] + for block_idx in range(depth): + from collections import OrderedDict + dual_attention_block = [] + for attn_idx, attn_type in enumerate(attn_types): + if attn_type == 'spatial': + dual_attention_block.append(('spatial_block', SpatialBlock( + dim=out_chs, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=drop_path_rates[block_idx], + norm_layer=norm_layer_cl, + ffn=ffn, + cpe_act=cpe_act, + window_size=window_size, + ))) + elif attn_type == 'channel': + dual_attention_block.append(('channel_block', ChannelBlock( + dim=out_chs, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=drop_path_rates[block_idx], + norm_layer=norm_layer_cl, + ffn=ffn, + cpe_act=cpe_act, + v2=channel_attn_v2, + ))) + if named_blocks: + stage_blocks.append(nn.Sequential(OrderedDict(dual_attention_block))) + else: + stage_blocks.append(nn.Sequential(*[b[1] for b in dual_attention_block])) + self.blocks = nn.Sequential(*stage_blocks) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + def forward(self, x: Tensor): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class DaVit(nn.Module): + r""" DaViT + A PyTorch implementation of `DaViT: Dual Attention Vision Transformers` - https://arxiv.org/abs/2204.03645 + Supports arbitrary input sizes and pyramid feature extraction + + Args: + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks in each stage. Default: (1, 1, 3, 1) + embed_dims (tuple(int)): Patch embedding dimension. Default: (96, 192, 384, 768) + num_heads (tuple(int)): Number of attention heads in different layers. Default: (3, 6, 12, 24) + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + """ + + def __init__( + self, + in_chans=3, + depths=(1, 1, 3, 1), + embed_dims=(96, 192, 384, 768), + num_heads=(3, 6, 12, 24), + window_size=7, + mlp_ratio=4, + qkv_bias=True, + norm_layer='layernorm2d', + norm_layer_cl='layernorm', + norm_eps=1e-5, + attn_types=('spatial', 'channel'), + ffn=True, + cpe_act=False, + down_kernel_size=2, + channel_attn_v2=False, + named_blocks=False, + drop_rate=0., + drop_path_rate=0., + num_classes=1000, + global_pool='avg', + head_norm_first=False, + ): + super().__init__() + num_stages = len(embed_dims) + assert num_stages == len(num_heads) == len(depths) + norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) + norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) + self.num_classes = num_classes + self.num_features = self.head_hidden_size = embed_dims[-1] + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + self.stem = Stem(in_chans, embed_dims[0], norm_layer=norm_layer) + in_chs = embed_dims[0] + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + for i in range(num_stages): + out_chs = embed_dims[i] + stage = DaVitStage( + in_chs, + out_chs, + depth=depths[i], + downsample=i > 0, + attn_types=attn_types, + num_heads=num_heads[i], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rates=dpr[i], + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + ffn=ffn, + cpe_act=cpe_act, + down_kernel_size=down_kernel_size, + channel_attn_v2=channel_attn_v2, + named_blocks=named_blocks, + ) + in_chs = out_chs + stages.append(stage) + self.feature_info += [dict(num_chs=out_chs, reduction=2**(i+2), module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets + # otherwise pool -> norm -> fc, the default DaViT order, similar to ConvNeXt + # FIXME generalize this structure to ClassifierHead + if head_norm_first: + self.norm_pre = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + else: + self.norm_pre = nn.Identity() + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + norm_layer=norm_layer, + ) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', # stem and embed + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm_pre', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + for stage in self.stages: + stage.set_grad_checkpointing(enable=enable) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _convert_florence2(state_dict, model, prefix='vision_tower.'): + import re + out_dict = {} + + for k, v in state_dict.items(): + if k.startswith(prefix): + k = k.replace(prefix, '') + else: + continue + k = re.sub(r'convs.([0-9]+)', r'stages.\1.downsample', k) + k = re.sub(r'blocks.([0-9]+)', r'stages.\1.blocks', k) + k = k.replace('downsample.proj', 'downsample.conv') + k = k.replace('stages.0.downsample', 'stem') + #k = k.replace('head.', 'head.fc.') + #k = k.replace('norms.', 'head.norm.') + k = k.replace('window_attn.norm.', 'norm1.') + k = k.replace('window_attn.fn.', 'attn.') + k = k.replace('channel_attn.norm.', 'norm1.') + k = k.replace('channel_attn.fn.', 'attn.') + k = k.replace('ffn.norm.', 'norm2.') + k = k.replace('ffn.fn.net.', 'mlp.') + k = k.replace('conv1.fn.dw', 'cpe1.proj') + k = k.replace('conv2.fn.dw', 'cpe2.proj') + out_dict[k] = v + + return out_dict + + +def checkpoint_filter_fn(state_dict, model): + """ Remap MSFT checkpoints -> timm """ + if 'head.fc.weight' in state_dict: + return state_dict # non-MSFT checkpoint + + if 'state_dict' in state_dict: + state_dict = state_dict['state_dict'] + + if 'vision_tower.convs.0.proj.weight' in state_dict: + return _convert_florence2(state_dict, model) + + import re + out_dict = {} + for k, v in state_dict.items(): + k = re.sub(r'patch_embeds.([0-9]+)', r'stages.\1.downsample', k) + k = re.sub(r'main_blocks.([0-9]+)', r'stages.\1.blocks', k) + k = k.replace('downsample.proj', 'downsample.conv') + k = k.replace('stages.0.downsample', 'stem') + k = k.replace('head.', 'head.fc.') + k = k.replace('norms.', 'head.norm.') + k = k.replace('cpe.0', 'cpe1') + k = k.replace('cpe.1', 'cpe2') + out_dict[k] = v + return out_dict + + +def _create_davit(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + strict = kwargs.pop('pretrained_strict', True) + if variant.endswith('_fl'): + # FIXME cleaner approach to missing head norm? + strict = False + + model = build_model_with_cfg( + DaVit, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_strict=strict, + **kwargs) + + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +# TODO contact authors to get larger pretrained models +default_cfgs = generate_default_cfgs({ + # official microsoft weights from https://github.com/dingmyu/davit + 'davit_tiny.msft_in1k': _cfg( + hf_hub_id='timm/'), + 'davit_small.msft_in1k': _cfg( + hf_hub_id='timm/'), + 'davit_base.msft_in1k': _cfg( + hf_hub_id='timm/'), + 'davit_large': _cfg(), + 'davit_huge': _cfg(), + 'davit_giant': _cfg(), + 'davit_base_fl.msft_florence2': _cfg( + hf_hub_id='microsoft/Florence-2-base', + num_classes=0, input_size=(3, 768, 768)), + 'davit_huge_fl.msft_florence2': _cfg( + hf_hub_id='microsoft/Florence-2-large', + num_classes=0, input_size=(3, 768, 768)), +}) + + +@register_model +def davit_tiny(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24)) + return _create_davit('davit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_small(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 9, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24)) + return _create_davit('davit_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_base(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32)) + return _create_davit('davit_base', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_large(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 9, 1), embed_dims=(192, 384, 768, 1536), num_heads=(6, 12, 24, 48)) + return _create_davit('davit_large', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_huge(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64)) + return _create_davit('davit_huge', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_giant(pretrained=False, **kwargs) -> DaVit: + model_args = dict(depths=(1, 1, 12, 3), embed_dims=(384, 768, 1536, 3072), num_heads=(12, 24, 48, 96)) + return _create_davit('davit_giant', pretrained=pretrained, **dict(model_args, **kwargs)) + + + +@register_model +def davit_base_fl(pretrained=False, **kwargs) -> DaVit: + model_args = dict( + depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32), + window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True, + ) + return _create_davit('davit_base_fl', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def davit_huge_fl(pretrained=False, **kwargs) -> DaVit: + # NOTE: huge image tower used in 'large' Florence2 model + model_args = dict( + depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64), + window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True, + ) + return _create_davit('davit_huge_fl', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/deit.py b/pytorch-image-models/timm/models/deit.py new file mode 100644 index 0000000000000000000000000000000000000000..0072013bf63ce7d529672f8d6678571f196eddf3 --- /dev/null +++ b/pytorch-image-models/timm/models/deit.py @@ -0,0 +1,417 @@ +""" DeiT - Data-efficient Image Transformers + +DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below + +paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118 + +Modifications copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from functools import partial +from typing import Optional + +import torch +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import resample_abs_pos_embed +from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn +from ._builder import build_model_with_cfg +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this + + +class VisionTransformerDistilled(VisionTransformer): + """ Vision Transformer w/ Distillation Token and Head + + Distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, *args, **kwargs): + weight_init = kwargs.pop('weight_init', '') + super().__init__(*args, **kwargs, weight_init='skip') + assert self.global_pool in ('token',) + + self.num_prefix_tokens = 2 + self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + self.pos_embed = nn.Parameter( + torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + trunc_normal_(self.dist_token, std=.02) + super().init_weights(mode=mode) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed|dist_token', + blocks=[ + (r'^blocks\.(\d+)', None), + (r'^norm', (99999,))] # final norm w/ last block + ) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head, self.head_dist + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def _pos_embed(self, x): + if self.dynamic_img_size: + B, H, W, C = x.shape + prev_grid_size = self.patch_embed.grid_size + pos_embed = resample_abs_pos_embed( + self.pos_embed, + new_size=(H, W), + old_size=prev_grid_size, + num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, + ) + x = x.view(B, -1, C) + else: + pos_embed = self.pos_embed + if self.no_embed_class: + # deit-3, updated JAX (big vision) + # position embedding does not overlap with class token, add then concat + x = x + pos_embed + x = torch.cat(( + self.cls_token.expand(x.shape[0], -1, -1), + self.dist_token.expand(x.shape[0], -1, -1), + x), + dim=1) + else: + # original timm, JAX, and deit vit impl + # pos_embed has entry for class token, concat then add + x = torch.cat(( + self.cls_token.expand(x.shape[0], -1, -1), + self.dist_token.expand(x.shape[0], -1, -1), + x), + dim=1) + x = x + pos_embed + return self.pos_drop(x) + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + x, x_dist = x[:, 0], x[:, 1] + if pre_logits: + return (x + x_dist) / 2 + x = self.head(x) + x_dist = self.head_dist(x_dist) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train / finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + +def _create_deit(variant, pretrained=False, distilled=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model_cls = VisionTransformerDistilled if distilled else VisionTransformer + model = build_model_with_cfg( + model_cls, + variant, + pretrained, + pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True), + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # deit models (FB weights) + 'deit_tiny_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'), + 'deit_small_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'), + 'deit_base_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'), + 'deit_base_patch16_384.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'deit_tiny_distilled_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + 'deit3_small_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'), + 'deit3_small_patch16_384.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_medium_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'), + 'deit3_base_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'), + 'deit3_base_patch16_384.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_large_patch16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'), + 'deit3_large_patch16_384.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_huge_patch14_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'), + + 'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth', + crop_pct=1.0), + 'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth', + crop_pct=1.0), + 'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth', + crop_pct=1.0), + 'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth', + crop_pct=1.0), + 'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth', + crop_pct=1.0), +}) + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) + model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) + model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) + model = _create_deit( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) + model = _create_deit( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_deit( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled: + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_deit( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6) + model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +register_model_deprecations(__name__, { + 'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k', + 'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k', + 'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k', + 'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k', + 'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k', + 'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k', + 'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k', + 'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k' +}) diff --git a/pytorch-image-models/timm/models/densenet.py b/pytorch-image-models/timm/models/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..31d1f73f9ccfc8d9cf839544791417adccd9fe8f --- /dev/null +++ b/pytorch-image-models/timm/models/densenet.py @@ -0,0 +1,424 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier +from ._builder import build_model_with_cfg +from ._manipulate import MATCH_PREV_GROUP +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['DenseNet'] + + +class DenseLayer(nn.Module): + def __init__( + self, + num_input_features, + growth_rate, + bn_size, + norm_layer=BatchNormAct2d, + drop_rate=0., + grad_checkpointing=False, + ): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.grad_checkpointing = grad_checkpointing + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.grad_checkpointing and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__( + self, + num_layers, + num_input_features, + bn_size, + growth_rate, + norm_layer=BatchNormAct2d, + drop_rate=0., + grad_checkpointing=False, + ): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + grad_checkpointing=grad_checkpointing, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__( + self, + num_input_features, + num_output_features, + norm_layer=BatchNormAct2d, + aa_layer=None, + ): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate before classifier layer + proj_drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__( + self, + growth_rate=32, + block_config=(6, 12, 24, 16), + num_classes=1000, + in_chans=3, + global_pool='avg', + bn_size=4, + stem_type='', + act_layer='relu', + norm_layer='batchnorm2d', + aa_layer=None, + drop_rate=0., + proj_drop_rate=0., + memory_efficient=False, + aa_stem_only=True, + ): + self.num_classes = num_classes + super(DenseNet, self).__init__() + norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=proj_drop_rate, + grad_checkpointing=memory_efficient, + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, + num_output_features=num_features // 2, + norm_layer=norm_layer, + aa_layer=transition_aa_layer, + ) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = self.head_hidden_size = num_features + + # Linear layer + global_pool, classifier = create_classifier( + self.num_features, + self.num_classes, + pool_type=global_pool, + ) + self.global_pool = global_pool + self.head_drop = nn.Dropout(drop_rate) + self.classifier = classifier + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]', + blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [ + (r'^features\.denseblock(\d+)\.denselayer(\d+)', None), + (r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for b in self.features.modules(): + if isinstance(b, DenseLayer): + b.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True), + pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'densenet121.ra_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'densenetblur121d.ra_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'densenet264d.untrained': _cfg(), + 'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'), + 'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'), + 'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'), + 'densenet161.tv_in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def densenet121(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16)) + model = _create_densenet('densenet121', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-121 w/ blur-pooling & 3-layer 3x3 stem + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', aa_layer=BlurPool2d) + model = _create_densenet('densenetblur121d', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=32, block_config=(6, 12, 32, 32)) + model = _create_densenet('densenet169', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=32, block_config=(6, 12, 48, 32)) + model = _create_densenet('densenet201', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=48, block_config=(6, 12, 36, 24)) + model = _create_densenet('densenet161', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def densenet264d(pretrained=False, **kwargs) -> DenseNet: + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model_args = dict(growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep') + model = _create_densenet('densenet264d', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +register_model_deprecations(__name__, { + 'tv_densenet121': 'densenet121.tv_in1k', +}) diff --git a/pytorch-image-models/timm/models/dla.py b/pytorch-image-models/timm/models/dla.py new file mode 100644 index 0000000000000000000000000000000000000000..666acd9d9ca8186f5e7b19a3e71beb551e6be8e1 --- /dev/null +++ b/pytorch-image-models/timm/models/dla.py @@ -0,0 +1,515 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: https://github.com/ucbdrive/dla +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['DLA'] + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, + stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, + stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, + stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, + stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, x_children: List[torch.Tensor]): + x = self.conv(torch.cat(x_children, 1)) + x = self.bn(x) + if self.shortcut: + x += x_children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__( + self, + levels, + block, + in_channels, + out_channels, + stride=1, + dilation=1, + cardinality=1, + base_width=64, + level_root=False, + root_dim=0, + root_kernel_size=1, + root_shortcut=False, + ): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, + block, + in_channels, + out_channels, + stride, + root_dim=0, + **cargs, + ) + self.tree2 = DlaTree( + levels - 1, + block, + out_channels, + out_channels, + root_dim=root_dim + out_channels, + **cargs, + ) + self.root = None + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if children is None: + children = [] + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.root is not None: # levels == 1 + x2 = self.tree2(x1) + x = self.root([x2, x1] + children) + else: + children.append(x1) + x = self.tree2(x1, None, children) + return x + + +class DLA(nn.Module): + def __init__( + self, + levels, + channels, + output_stride=32, + num_classes=1000, + in_chans=3, + global_pool='avg', + cardinality=1, + base_width=64, + block=DlaBottle2neck, + shortcut_root=False, + drop_rate=0.0, + ): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True), + ) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = self.head_hidden_size = channels[-1] + self.global_pool, self.head_drop, self.fc = create_classifier( + self.num_features, + self.num_classes, + pool_type=global_pool, + use_conv=True, + drop_rate=drop_rate, + ) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d( + inplanes, planes, kernel_size=3, + stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^base_layer', + blocks=r'^level(\d+)' if coarse else [ + # an unusual arch, this achieves somewhat more granularity without getting super messy + (r'^level(\d+)\.tree(\d+)', None), + (r'^level(\d+)\.root', (2,)), + (r'^level(\d+)', (1,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.fc + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + if pre_logits: + return self.flatten(x) + x = self.fc(x) + return self.flatten(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, + variant, + pretrained, + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'dla34.in1k': _cfg(hf_hub_id='timm/'), + 'dla46_c.in1k': _cfg(hf_hub_id='timm/'), + 'dla46x_c.in1k': _cfg(hf_hub_id='timm/'), + 'dla60x_c.in1k': _cfg(hf_hub_id='timm/'), + 'dla60.in1k': _cfg(hf_hub_id='timm/'), + 'dla60x.in1k': _cfg(hf_hub_id='timm/'), + 'dla102.in1k': _cfg(hf_hub_id='timm/'), + 'dla102x.in1k': _cfg(hf_hub_id='timm/'), + 'dla102x2.in1k': _cfg(hf_hub_id='timm/'), + 'dla169.in1k': _cfg(hf_hub_id='timm/'), + 'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'), + 'dla60_res2next.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs) -> DLA: + model_args = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28) + return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_args = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4) + return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla34(pretrained=False, **kwargs) -> DLA: # DLA-34 + model_args = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic) + return _create_dla('dla34', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla46_c(pretrained=False, **kwargs) -> DLA: # DLA-46-C + model_args = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck) + return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla46x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-46-C + model_args = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4) + return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla60x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-60-C + model_args = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4) + return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla60(pretrained=False, **kwargs) -> DLA: # DLA-60 + model_args = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck) + return _create_dla('dla60', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla60x(pretrained=False, **kwargs) -> DLA: # DLA-X-60 + model_args = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4) + return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla102(pretrained=False, **kwargs) -> DLA: # DLA-102 + model_args = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True) + return _create_dla('dla102', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla102x(pretrained=False, **kwargs) -> DLA: # DLA-X-102 + model_args = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True) + return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla102x2(pretrained=False, **kwargs) -> DLA: # DLA-X-102 64 + model_args = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True) + return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dla169(pretrained=False, **kwargs) -> DLA: # DLA-169 + model_args = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True) + return _create_dla('dla169', pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/dpn.py b/pytorch-image-models/timm/models/dpn.py new file mode 100644 index 0000000000000000000000000000000000000000..c03e5fe1a14e37021b8da200c442161e380a3d2a --- /dev/null +++ b/pytorch-image-models/timm/models/dpn.py @@ -0,0 +1,371 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['DPN'] + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, + in_chs, + num_1x1_a, + num_3x3_b, + num_1x1_c, + inc, + groups, + block_type='normal', + b=False, + ): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__( + self, + k_sec=(3, 4, 20, 3), + inc_sec=(16, 32, 24, 128), + k_r=96, + groups=32, + num_classes=1000, + in_chans=3, + output_stride=32, + global_pool='avg', + small=False, + num_init_features=64, + b=False, + drop_rate=0., + norm_layer='batchnorm2d', + act_layer='relu', + fc_act_layer='elu', + ): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + + norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001) + fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvNormAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = self.head_hidden_size = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^features\.conv1', + blocks=[ + (r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None), + (r'^features\.conv5_bn_ac', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + if pre_logits: + return self.flatten(x) + x = self.classifier(x) + return self.flatten(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, + variant, + pretrained, + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'), + 'dpn68b.ra_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'), + 'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'), + 'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'), + 'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'), + 'dpn107.mx_in1k': _cfg(hf_hub_id='timm/') +}) + + +@register_model +def dpn48b(pretrained=False, **kwargs) -> DPN: + model_args = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu') + return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn68(pretrained=False, **kwargs) -> DPN: + model_args = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) + return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn68b(pretrained=False, **kwargs) -> DPN: + model_args = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) + return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn92(pretrained=False, **kwargs) -> DPN: + model_args = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128)) + return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn98(pretrained=False, **kwargs) -> DPN: + model_args = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128)) + return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn131(pretrained=False, **kwargs) -> DPN: + model_args = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128)) + return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def dpn107(pretrained=False, **kwargs) -> DPN: + model_args = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128)) + return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/edgenext.py b/pytorch-image-models/timm/models/edgenext.py new file mode 100644 index 0000000000000000000000000000000000000000..d768b1dc331789f52310fc97c6d169b82fb56b27 --- /dev/null +++ b/pytorch-image-models/timm/models/edgenext.py @@ -0,0 +1,576 @@ +""" EdgeNeXt + +Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` + - https://arxiv.org/abs/2206.10589 + +Original code and weights from https://github.com/mmaaz60/EdgeNeXt + +Modifications and additions for timm by / Copyright 2022, Ross Wightman +""" +import math +from functools import partial +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, create_conv2d, \ + NormMlpClassifierHead, ClassifierHead +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._manipulate import named_apply, checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + + def forward(self, shape: Tuple[int, int, int]): + device = self.token_projection.weight.device + dtype = self.token_projection.weight.dtype + inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) + y_embed = inv_mask.cumsum(1, dtype=torch.float32) + x_embed = inv_mask.cumsum(2, dtype=torch.float32) + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), + pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), + pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos.to(dtype)) + + return pos + + +class ConvBlock(nn.Module): + def __init__( + self, + dim, + dim_out=None, + kernel_size=7, + stride=1, + conv_bias=True, + expand_ratio=4, + ls_init_value=1e-6, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, drop_path=0., + ): + super().__init__() + dim_out = dim_out or dim + self.shortcut_after_dw = stride > 1 or dim != dim_out + + self.conv_dw = create_conv2d( + dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) + self.norm = norm_layer(dim_out) + self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + if self.shortcut_after_dw: + shortcut = x + + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.mlp(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = shortcut + self.drop_path(x) + return x + + +class CrossCovarianceAttn(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0. + ): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) + + # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v) + + x = x.permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class SplitTransposeBlock(nn.Module): + def __init__( + self, + dim, + num_scales=1, + num_heads=8, + expand_ratio=4, + use_pos_emb=True, + conv_bias=True, + qkv_bias=True, + ls_init_value=1e-6, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_path=0., + attn_drop=0., + proj_drop=0. + ): + super().__init__() + width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) + self.width = width + self.num_scales = max(1, num_scales - 1) + + convs = [] + for i in range(self.num_scales): + convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) + self.convs = nn.ModuleList(convs) + + self.pos_embd = None + if use_pos_emb: + self.pos_embd = PositionalEncodingFourier(dim=dim) + self.norm_xca = norm_layer(dim) + self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None + self.xca = CrossCovarianceAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) + + self.norm = norm_layer(dim, eps=1e-6) + self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + + # scales code re-written for torchscript as per my res2net fixes -rw + # NOTE torch.split(x, self.width, 1) causing issues with ONNX export + spx = x.chunk(len(self.convs) + 1, dim=1) + spo = [] + sp = spx[0] + for i, conv in enumerate(self.convs): + if i > 0: + sp = sp + spx[i] + sp = conv(sp) + spo.append(sp) + spo.append(spx[-1]) + x = torch.cat(spo, 1) + + # XCA + B, C, H, W = x.shape + x = x.reshape(B, C, H * W).permute(0, 2, 1) + if self.pos_embd is not None: + pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) + x = x.reshape(B, H, W, C) + + # Inverted Bottleneck + x = self.norm(x) + x = self.mlp(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = shortcut + self.drop_path(x) + return x + + +class EdgeNeXtStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride=2, + depth=2, + num_global_blocks=1, + num_heads=4, + scales=2, + kernel_size=7, + expand_ratio=4, + use_pos_emb=False, + downsample_block=False, + conv_bias=True, + ls_init_value=1.0, + drop_path_rates=None, + norm_layer=LayerNorm2d, + norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU + ): + super().__init__() + self.grad_checkpointing = False + + if downsample_block or stride == 1: + self.downsample = nn.Identity() + else: + self.downsample = nn.Sequential( + norm_layer(in_chs), + nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) + ) + in_chs = out_chs + + stage_blocks = [] + for i in range(depth): + if i < depth - num_global_blocks: + stage_blocks.append( + ConvBlock( + dim=in_chs, + dim_out=out_chs, + stride=stride if downsample_block and i == 0 else 1, + conv_bias=conv_bias, + kernel_size=kernel_size, + expand_ratio=expand_ratio, + ls_init_value=ls_init_value, + drop_path=drop_path_rates[i], + norm_layer=norm_layer_cl, + act_layer=act_layer, + ) + ) + else: + stage_blocks.append( + SplitTransposeBlock( + dim=in_chs, + num_scales=scales, + num_heads=num_heads, + expand_ratio=expand_ratio, + use_pos_emb=use_pos_emb, + conv_bias=conv_bias, + ls_init_value=ls_init_value, + drop_path=drop_path_rates[i], + norm_layer=norm_layer_cl, + act_layer=act_layer, + ) + ) + in_chs = out_chs + self.blocks = nn.Sequential(*stage_blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class EdgeNeXt(nn.Module): + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + dims=(24, 48, 88, 168), + depths=(3, 3, 9, 3), + global_block_counts=(0, 1, 1, 1), + kernel_sizes=(3, 5, 7, 9), + heads=(8, 8, 8, 8), + d2_scales=(2, 2, 3, 4), + use_pos_emb=(False, True, False, False), + ls_init_value=1e-6, + head_init_scale=1., + expand_ratio=4, + downsample_block=False, + conv_bias=True, + stem_type='patch', + head_norm_first=False, + act_layer=nn.GELU, + drop_path_rate=0., + drop_rate=0., + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.drop_rate = drop_rate + norm_layer = partial(LayerNorm2d, eps=1e-6) + norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) + self.feature_info = [] + + assert stem_type in ('patch', 'overlap') + if stem_type == 'patch': + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), + norm_layer(dims[0]), + ) + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), + norm_layer(dims[0]), + ) + + curr_stride = 4 + stages = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + in_chs = dims[0] + for i in range(4): + stride = 2 if curr_stride == 2 or i > 0 else 1 + # FIXME support dilation / output_stride + curr_stride *= stride + stages.append(EdgeNeXtStage( + in_chs=in_chs, + out_chs=dims[i], + stride=stride, + depth=depths[i], + num_global_blocks=global_block_counts[i], + num_heads=heads[i], + drop_path_rates=dp_rates[i], + scales=d2_scales[i], + expand_ratio=expand_ratio, + kernel_size=kernel_sizes[i], + use_pos_emb=use_pos_emb[i], + ls_init_value=ls_init_value, + downsample_block=downsample_block, + conv_bias=conv_bias, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + act_layer=act_layer, + )) + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + in_chs = dims[i] + self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.num_features = self.head_hidden_size = dims[-1] + if head_norm_first: + self.norm_pre = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + else: + self.norm_pre = nn.Identity() + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + norm_layer=norm_layer, + ) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm_pre', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + trunc_normal_tf_(module.weight, std=.02) + nn.init.zeros_(module.bias) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap FB checkpoints -> timm """ + if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: + return state_dict # non-FB checkpoint + + # models were released as train checkpoints... :/ + if 'model_ema' in state_dict: + state_dict = state_dict['model_ema'] + elif 'model' in state_dict: + state_dict = state_dict['model'] + elif 'state_dict' in state_dict: + state_dict = state_dict['state_dict'] + + out_dict = {} + import re + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) + k = k.replace('dwconv', 'conv_dw') + k = k.replace('pwconv', 'mlp.fc') + k = k.replace('head.', 'head.fc.') + if k.startswith('norm.'): + k = k.replace('norm', 'head.norm') + if v.ndim == 2 and 'head' not in k: + model_shape = model.state_dict()[k].shape + v = v.reshape(model_shape) + out_dict[k] = v + return out_dict + + +def _create_edgenext(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + EdgeNeXt, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'edgenext_xx_small.in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'edgenext_x_small.in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'edgenext_small.usi_in1k': _cfg( # USI weights + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), + 'edgenext_base.usi_in1k': _cfg( # USI weights + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), + 'edgenext_base.in21k_ft_in1k': _cfg( # USI weights + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), + 'edgenext_small_rw.sw_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), +}) + + +@register_model +def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: + # 1.33M & 260.58M @ 256 resolution + # 71.23% Top-1 accuracy + # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS + # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS + model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4)) + return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: + # 2.34M & 538.0M @ 256 resolution + # 75.00% Top-1 accuracy + # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=31.61 versus 28.49 for MobileViT_XS + # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS + model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4)) + return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: + # 5.59M & 1260.59M @ 256 resolution + # 79.43% Top-1 accuracy + # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=20.47 versus 18.86 for MobileViT_S + # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S + model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304)) + return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: + # 18.51M & 3840.93M @ 256 resolution + # 82.5% (normal) 83.7% (USI) Top-1 accuracy + # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=xx.xx versus xx.xx for MobileViT_S + # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx + model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584]) + return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: + model_args = dict( + depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), + downsample_block=True, conv_bias=False, stem_type='overlap') + return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + diff --git a/pytorch-image-models/timm/models/efficientformer.py b/pytorch-image-models/timm/models/efficientformer.py new file mode 100644 index 0000000000000000000000000000000000000000..669062d365cbd0e562d261e776d0c60c462c9645 --- /dev/null +++ b/pytorch-image-models/timm/models/efficientformer.py @@ -0,0 +1,649 @@ +""" EfficientFormer + +@article{li2022efficientformer, + title={EfficientFormer: Vision Transformers at MobileNet Speed}, + author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, + Sergey and Wang, Yanzhi and Ren, Jian}, + journal={arXiv preprint arXiv:2206.01191}, + year={2022} +} + +Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc. + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp, ndgrid +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['EfficientFormer'] # model_registry will add each entrypoint fn to this + + +EfficientFormer_width = { + 'l1': (48, 96, 224, 448), + 'l3': (64, 128, 320, 512), + 'l7': (96, 192, 384, 768), +} + +EfficientFormer_depth = { + 'l1': (3, 2, 6, 4), + 'l3': (4, 4, 12, 6), + 'l7': (6, 6, 18, 8), +} + + +class Attention(torch.nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim=384, + key_dim=32, + num_heads=8, + attn_ratio=4, + resolution=7 + ): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = self.val_dim * num_heads + self.attn_ratio = attn_ratio + + self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim) + self.proj = nn.Linear(self.val_attn_dim, dim) + + resolution = to_2tuple(resolution) + pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) + rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) + self.register_buffer('attention_bias_idxs', rel_pos) + self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): # x (B,N,C) + B, N, C = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + self.get_attention_biases(x.device) + + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) + x = self.proj(x) + return x + + +class Stem4(nn.Sequential): + def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super().__init__() + self.stride = 4 + + self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1)) + self.add_module('norm1', norm_layer(out_chs // 2)) + self.add_module('act1', act_layer()) + self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1)) + self.add_module('norm2', norm_layer(out_chs)) + self.add_module('act2', act_layer()) + + +class Downsample(nn.Module): + """ + Downsampling via strided conv w/ norm + Input: tensor in shape [B, C, H, W] + Output: tensor in shape [B, C, H/stride, W/stride] + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d): + super().__init__() + if padding is None: + padding = kernel_size // 2 + self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) + self.norm = norm_layer(out_chs) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class Flat(nn.Module): + + def __init__(self, ): + super().__init__() + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) + return x + + +class Pooling(nn.Module): + """ + Implementation of pooling for PoolFormer + --pool_size: pooling size + """ + + def __init__(self, pool_size=3): + super().__init__() + self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) + + def forward(self, x): + return self.pool(x) - x + + +class ConvMlpWithNorm(nn.Module): + """ + Implementation of MLP with 1*1 convolutions. + Input: tensor with shape [B, C, H, W] + """ + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + drop=0. + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.norm2(x) + x = self.drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class MetaBlock1d(nn.Module): + + def __init__( + self, + dim, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + proj_drop=0., + drop_path=0., + layer_scale_init_value=1e-5 + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.token_mixer = Attention(dim) + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.ls1 = LayerScale(dim, layer_scale_init_value) + self.ls2 = LayerScale(dim, layer_scale_init_value) + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) + x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class MetaBlock2d(nn.Module): + + def __init__( + self, + dim, + pool_size=3, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + proj_drop=0., + drop_path=0., + layer_scale_init_value=1e-5 + ): + super().__init__() + self.token_mixer = Pooling(pool_size=pool_size) + self.ls1 = LayerScale2d(dim, layer_scale_init_value) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = ConvMlpWithNorm( + dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + norm_layer=norm_layer, + drop=proj_drop, + ) + self.ls2 = LayerScale2d(dim, layer_scale_init_value) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self.token_mixer(x))) + x = x + self.drop_path2(self.ls2(self.mlp(x))) + return x + + +class EfficientFormerStage(nn.Module): + + def __init__( + self, + dim, + dim_out, + depth, + downsample=True, + num_vit=1, + pool_size=3, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + norm_layer_cl=nn.LayerNorm, + proj_drop=.0, + drop_path=0., + layer_scale_init_value=1e-5, +): + super().__init__() + self.grad_checkpointing = False + + if downsample: + self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer) + dim = dim_out + else: + assert dim == dim_out + self.downsample = nn.Identity() + + blocks = [] + if num_vit and num_vit >= depth: + blocks.append(Flat()) + + for block_idx in range(depth): + remain_idx = depth - block_idx - 1 + if num_vit and num_vit > remain_idx: + blocks.append( + MetaBlock1d( + dim, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + norm_layer=norm_layer_cl, + proj_drop=proj_drop, + drop_path=drop_path[block_idx], + layer_scale_init_value=layer_scale_init_value, + )) + else: + blocks.append( + MetaBlock2d( + dim, + pool_size=pool_size, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + norm_layer=norm_layer, + proj_drop=proj_drop, + drop_path=drop_path[block_idx], + layer_scale_init_value=layer_scale_init_value, + )) + if num_vit and num_vit == remain_idx: + blocks.append(Flat()) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class EfficientFormer(nn.Module): + + def __init__( + self, + depths, + embed_dims=None, + in_chans=3, + num_classes=1000, + global_pool='avg', + downsamples=None, + num_vit=0, + mlp_ratios=4, + pool_size=3, + layer_scale_init_value=1e-5, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + norm_layer_cl=nn.LayerNorm, + drop_rate=0., + proj_drop_rate=0., + drop_path_rate=0., + **kwargs + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + + self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer) + prev_dim = embed_dims[0] + + # stochastic depth decay rule + self.num_stages = len(depths) + last_stage = self.num_stages - 1 + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + downsamples = downsamples or (False,) + (True,) * (self.num_stages - 1) + stages = [] + self.feature_info = [] + for i in range(self.num_stages): + stage = EfficientFormerStage( + prev_dim, + embed_dims[i], + depths[i], + downsample=downsamples[i], + num_vit=num_vit if i == last_stage else 0, + pool_size=pool_size, + mlp_ratio=mlp_ratios, + act_layer=act_layer, + norm_layer_cl=norm_layer_cl, + norm_layer=norm_layer, + proj_drop=proj_drop_rate, + drop_path=dpr[i], + layer_scale_init_value=layer_scale_init_value, + ) + prev_dim = embed_dims[i] + stages.append(stage) + self.feature_info += [dict(num_chs=embed_dims[i], reduction=2**(i+2), module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + # Classifier head + self.num_features = self.head_hidden_size = embed_dims[-1] + self.norm = norm_layer_cl(self.num_features) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + # assuming model is always distilled (valid for current checkpoints, will split def if that changes) + self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + self.apply(self._init_weights) + + # init for classification + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {k for k, _ in self.named_parameters() if 'attention_biases' in k} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head, self.head_dist + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages), indices) + + # forward pass + x = self.stem(x) + B, C, H, W = x.shape + + last_idx = self.num_stages - 1 + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index + 1] + feat_idx = 0 + for feat_idx, stage in enumerate(stages): + x = stage(x) + if feat_idx < last_idx: + B, C, H, W = x.shape + if feat_idx in take_indices: + if feat_idx == last_idx: + x_inter = self.norm(x) if norm else x + intermediates.append(x_inter.reshape(B, H // 2, W // 2, -1).permute(0, 3, 1, 2)) + else: + intermediates.append(x) + + if intermediates_only: + return intermediates + + if feat_idx == last_idx: + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages), indices) + self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + x = self.head_drop(x) + if pre_logits: + return x + x, x_dist = self.head(x), self.head_dist(x) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train/finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'stem.0.weight' in state_dict: + return state_dict # non-original checkpoint, no remapping needed + + out_dict = {} + import re + stage_idx = 0 + for k, v in state_dict.items(): + if k.startswith('patch_embed'): + k = k.replace('patch_embed.0', 'stem.conv1') + k = k.replace('patch_embed.1', 'stem.norm1') + k = k.replace('patch_embed.3', 'stem.conv2') + k = k.replace('patch_embed.4', 'stem.norm2') + + if re.match(r'network\.(\d+)\.proj\.weight', k): + stage_idx += 1 + k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k) + k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k) + k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k) + + k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k) + k = k.replace('dist_head', 'head_dist') + out_dict[k] = v + return out_dict + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, + 'crop_pct': .95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'), + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'efficientformer_l1.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientformer_l3.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientformer_l7.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), +}) + + +def _create_efficientformer(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 4) + model = build_model_with_cfg( + EfficientFormer, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +@register_model +def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer: + model_args = dict( + depths=EfficientFormer_depth['l1'], + embed_dims=EfficientFormer_width['l1'], + num_vit=1, + ) + return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer: + model_args = dict( + depths=EfficientFormer_depth['l3'], + embed_dims=EfficientFormer_width['l3'], + num_vit=4, + ) + return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer: + model_args = dict( + depths=EfficientFormer_depth['l7'], + embed_dims=EfficientFormer_width['l7'], + num_vit=8, + ) + return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs)) + diff --git a/pytorch-image-models/timm/models/efficientformer_v2.py b/pytorch-image-models/timm/models/efficientformer_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf6499537af5d707d392554f417a80e537b7c49 --- /dev/null +++ b/pytorch-image-models/timm/models/efficientformer_v2.py @@ -0,0 +1,736 @@ +""" EfficientFormer-V2 + +@article{ + li2022rethinking, + title={Rethinking Vision Transformers for MobileNet Size and Speed}, + author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian}, + journal={arXiv preprint arXiv:2212.08059}, + year={2022} +} + +Significantly refactored and cleaned up for timm from original at: https://github.com/snap-research/EfficientFormer + +Original code licensed Apache 2.0, Copyright (c) 2022 Snap Inc. + +Modifications and timm support by / Copyright 2023, Ross Wightman +""" +import math +from functools import partial +from typing import Dict, Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct +from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple, ndgrid +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + + +__all__ = ['EfficientFormerV2'] + +EfficientFormer_width = { + 'L': (40, 80, 192, 384), # 26m 83.3% 6attn + 'S2': (32, 64, 144, 288), # 12m 81.6% 4attn dp0.02 + 'S1': (32, 48, 120, 224), # 6.1m 79.0 + 'S0': (32, 48, 96, 176), # 75.0 75.7 +} + +EfficientFormer_depth = { + 'L': (5, 5, 15, 10), # 26m 83.3% + 'S2': (4, 4, 12, 8), # 12m + 'S1': (3, 3, 9, 6), # 79.0 + 'S0': (2, 2, 6, 4), # 75.7 +} + +EfficientFormer_expansion_ratios = { + 'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)), + 'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)), + 'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)), + 'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4)), +} + + +class ConvNorm(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding='', + dilation=1, + groups=1, + bias=True, + norm_layer='batchnorm2d', + norm_kwargs=None, + ): + norm_kwargs = norm_kwargs or {} + super(ConvNorm, self).__init__() + self.conv = create_conv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class Attention2d(torch.nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim=384, + key_dim=32, + num_heads=8, + attn_ratio=4, + resolution=7, + act_layer=nn.GELU, + stride=None, + ): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + + resolution = to_2tuple(resolution) + if stride is not None: + resolution = tuple([math.ceil(r / stride) for r in resolution]) + self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim) + self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear') + else: + self.stride_conv = None + self.upsample = None + + self.resolution = resolution + self.N = self.resolution[0] * self.resolution[1] + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + kh = self.key_dim * self.num_heads + + self.q = ConvNorm(dim, kh) + self.k = ConvNorm(dim, kh) + self.v = ConvNorm(dim, self.dh) + self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh) + self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) + self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) + + self.act = act_layer() + self.proj = ConvNorm(self.dh, dim, 1) + + pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) + rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N)) + self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False) + self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): + B, C, H, W = x.shape + if self.stride_conv is not None: + x = self.stride_conv(x) + + q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) + k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) + v = self.v(x) + v_local = self.v_local(v) + v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) + + attn = (q @ k) * self.scale + attn = attn + self.get_attention_biases(x.device) + attn = self.talking_head1(attn) + attn = attn.softmax(dim=-1) + attn = self.talking_head2(attn) + + x = (attn @ v).transpose(2, 3) + x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local + if self.upsample is not None: + x = self.upsample(x) + + x = self.act(x) + x = self.proj(x) + return x + + +class LocalGlobalQuery(torch.nn.Module): + def __init__(self, in_dim, out_dim): + super().__init__() + self.pool = nn.AvgPool2d(1, 2, 0) + self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim) + self.proj = ConvNorm(in_dim, out_dim, 1) + + def forward(self, x): + local_q = self.local(x) + pool_q = self.pool(x) + q = local_q + pool_q + q = self.proj(q) + return q + + +class Attention2dDownsample(torch.nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim=384, + key_dim=16, + num_heads=8, + attn_ratio=4, + resolution=7, + out_dim=None, + act_layer=nn.GELU, + ): + super().__init__() + + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.resolution = to_2tuple(resolution) + self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution]) + self.N = self.resolution[0] * self.resolution[1] + self.N2 = self.resolution2[0] * self.resolution2[1] + + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.out_dim = out_dim or dim + kh = self.key_dim * self.num_heads + + self.q = LocalGlobalQuery(dim, kh) + self.k = ConvNorm(dim, kh, 1) + self.v = ConvNorm(dim, self.dh, 1) + self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh) + + self.act = act_layer() + self.proj = ConvNorm(self.dh, self.out_dim, 1) + + self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N)) + k_pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) + q_pos = torch.stack(ndgrid( + torch.arange(0, self.resolution[0], step=2), + torch.arange(0, self.resolution[1], step=2) + )).flatten(1) + rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] + self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) + self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): + B, C, H, W = x.shape + + q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2) + k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) + v = self.v(x) + v_local = self.v_local(v) + v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) + + attn = (q @ k) * self.scale + attn = attn + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(2, 3) + x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local + x = self.act(x) + x = self.proj(x) + return x + + +class Downsample(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1, + resolution=7, + use_attn=False, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + ): + super().__init__() + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + norm_layer = norm_layer or nn.Identity() + self.conv = ConvNorm( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=stride, + padding=padding, + norm_layer=norm_layer, + ) + + if use_attn: + self.attn = Attention2dDownsample( + dim=in_chs, + out_dim=out_chs, + resolution=resolution, + act_layer=act_layer, + ) + else: + self.attn = None + + def forward(self, x): + out = self.conv(x) + if self.attn is not None: + return self.attn(x) + out + return out + + +class ConvMlpWithNorm(nn.Module): + """ + Implementation of MLP with 1*1 convolutions. + Input: tensor with shape [B, C, H, W] + """ + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + drop=0., + mid_conv=False, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = ConvNormAct( + in_features, hidden_features, 1, + bias=True, norm_layer=norm_layer, act_layer=act_layer) + if mid_conv: + self.mid = ConvNormAct( + hidden_features, hidden_features, 3, + groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer) + else: + self.mid = nn.Identity() + self.drop1 = nn.Dropout(drop) + self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer) + self.drop2 = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.mid(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class EfficientFormerV2Block(nn.Module): + def __init__( + self, + dim, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + proj_drop=0., + drop_path=0., + layer_scale_init_value=1e-5, + resolution=7, + stride=None, + use_attn=True, + ): + super().__init__() + + if use_attn: + self.token_mixer = Attention2d( + dim, + resolution=resolution, + act_layer=act_layer, + stride=stride, + ) + self.ls1 = LayerScale2d( + dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + else: + self.token_mixer = None + self.ls1 = None + self.drop_path1 = None + + self.mlp = ConvMlpWithNorm( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + norm_layer=norm_layer, + drop=proj_drop, + mid_conv=True, + ) + self.ls2 = LayerScale2d( + dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + if self.token_mixer is not None: + x = x + self.drop_path1(self.ls1(self.token_mixer(x))) + x = x + self.drop_path2(self.ls2(self.mlp(x))) + return x + + +class Stem4(nn.Sequential): + def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): + super().__init__() + self.stride = 4 + self.conv1 = ConvNormAct( + in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True, + norm_layer=norm_layer, act_layer=act_layer + ) + self.conv2 = ConvNormAct( + out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True, + norm_layer=norm_layer, act_layer=act_layer + ) + + +class EfficientFormerV2Stage(nn.Module): + + def __init__( + self, + dim, + dim_out, + depth, + resolution=7, + downsample=True, + block_stride=None, + downsample_use_attn=False, + block_use_attn=False, + num_vit=1, + mlp_ratio=4., + proj_drop=.0, + drop_path=0., + layer_scale_init_value=1e-5, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + + ): + super().__init__() + self.grad_checkpointing = False + mlp_ratio = to_ntuple(depth)(mlp_ratio) + resolution = to_2tuple(resolution) + + if downsample: + self.downsample = Downsample( + dim, + dim_out, + use_attn=downsample_use_attn, + resolution=resolution, + norm_layer=norm_layer, + act_layer=act_layer, + ) + dim = dim_out + resolution = tuple([math.ceil(r / 2) for r in resolution]) + else: + assert dim == dim_out + self.downsample = nn.Identity() + + blocks = [] + for block_idx in range(depth): + remain_idx = depth - num_vit - 1 + b = EfficientFormerV2Block( + dim, + resolution=resolution, + stride=block_stride, + mlp_ratio=mlp_ratio[block_idx], + use_attn=block_use_attn and block_idx > remain_idx, + proj_drop=proj_drop, + drop_path=drop_path[block_idx], + layer_scale_init_value=layer_scale_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + ) + blocks += [b] + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class EfficientFormerV2(nn.Module): + def __init__( + self, + depths, + in_chans=3, + img_size=224, + global_pool='avg', + embed_dims=None, + downsamples=None, + mlp_ratios=4, + norm_layer='batchnorm2d', + norm_eps=1e-5, + act_layer='gelu', + num_classes=1000, + drop_rate=0., + proj_drop_rate=0., + drop_path_rate=0., + layer_scale_init_value=1e-5, + num_vit=0, + distillation=True, + ): + super().__init__() + assert global_pool in ('avg', '') + self.num_classes = num_classes + self.global_pool = global_pool + self.feature_info = [] + img_size = to_2tuple(img_size) + norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) + act_layer = get_act_layer(act_layer) + + self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer) + prev_dim = embed_dims[0] + stride = 4 + + num_stages = len(depths) + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) + mlp_ratios = to_ntuple(num_stages)(mlp_ratios) + stages = [] + for i in range(num_stages): + curr_resolution = tuple([math.ceil(s / stride) for s in img_size]) + stage = EfficientFormerV2Stage( + prev_dim, + embed_dims[i], + depth=depths[i], + resolution=curr_resolution, + downsample=downsamples[i], + block_stride=2 if i == 2 else None, + downsample_use_attn=i >= 3, + block_use_attn=i >= 2, + num_vit=num_vit, + mlp_ratio=mlp_ratios[i], + proj_drop=proj_drop_rate, + drop_path=dpr[i], + layer_scale_init_value=layer_scale_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + ) + if downsamples[i]: + stride *= 2 + prev_dim = embed_dims[i] + self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')] + stages.append(stage) + self.stages = nn.Sequential(*stages) + + # Classifier head + self.num_features = self.head_hidden_size = embed_dims[-1] + self.norm = norm_layer(embed_dims[-1]) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + self.dist = distillation + if self.dist: + self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + else: + self.head_dist = None + + self.apply(self.init_weights) + self.distilled_training = False + + # init for classification + def init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {k for k, _ in self.named_parameters() if 'attention_biases' in k} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head, self.head_dist + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(2, 3)) + x = self.head_drop(x) + if pre_logits: + return x + x, x_dist = self.head(x), self.head_dist(x) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train/finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, + 'crop_pct': .95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'efficientformerv2_s0.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientformerv2_s1.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientformerv2_s2.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientformerv2_l.snap_dist_in1k': _cfg( + hf_hub_id='timm/', + ), +}) + + +def _create_efficientformerv2(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + EfficientFormerV2, variant, pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + return model + + +@register_model +def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2: + model_args = dict( + depths=EfficientFormer_depth['S0'], + embed_dims=EfficientFormer_width['S0'], + num_vit=2, + drop_path_rate=0.0, + mlp_ratios=EfficientFormer_expansion_ratios['S0'], + ) + return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2: + model_args = dict( + depths=EfficientFormer_depth['S1'], + embed_dims=EfficientFormer_width['S1'], + num_vit=2, + drop_path_rate=0.0, + mlp_ratios=EfficientFormer_expansion_ratios['S1'], + ) + return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2: + model_args = dict( + depths=EfficientFormer_depth['S2'], + embed_dims=EfficientFormer_width['S2'], + num_vit=4, + drop_path_rate=0.02, + mlp_ratios=EfficientFormer_expansion_ratios['S2'], + ) + return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2: + model_args = dict( + depths=EfficientFormer_depth['L'], + embed_dims=EfficientFormer_width['L'], + num_vit=6, + drop_path_rate=0.1, + mlp_ratios=EfficientFormer_expansion_ratios['L'], + ) + return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs)) + diff --git a/pytorch-image-models/timm/models/efficientnet.py b/pytorch-image-models/timm/models/efficientnet.py new file mode 100644 index 0000000000000000000000000000000000000000..07bb250c84efbce84072135c9808214da90a742b --- /dev/null +++ b/pytorch-image-models/timm/models/efficientnet.py @@ -0,0 +1,2860 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* TinyNet + - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 + - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, LayerType, \ + GroupNormAct, LayerNormAct2d, EvoNorm2dS0 +from ._builder import build_model_with_cfg, pretrained_cfg_for_features +from ._efficientnet_blocks import SqueezeExcite +from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from ._features import FeatureInfo, FeatureHooks, feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +class EfficientNet(nn.Module): + """ EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * MobileNet-V2 + * FBNet C + * Single-Path NAS Pixel1 + * TinyNet + """ + + def __init__( + self, + block_args: BlockArgs, + num_classes: int = 1000, + num_features: int = 1280, + in_chans: int = 3, + stem_size: int = 32, + stem_kernel_size: int = 3, + fix_stem: bool = False, + output_stride: int = 32, + pad_type: str = '', + act_layer: Optional[LayerType] = None, + norm_layer: Optional[LayerType] = None, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[LayerType] = None, + round_chs_fn: Callable = round_channels, + drop_rate: float = 0., + drop_path_rate: float = 0., + global_pool: str = 'avg' + ): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, + pad_type=pad_type, + round_chs_fn=round_chs_fn, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + se_layer=se_layer, + drop_path_rate=drop_path_rate, + ) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + self.stage_ends = [f['stage'] for f in self.feature_info] + head_chs = builder.in_chs + + # Head + Pooling + if num_features > 0: + self.conv_head = create_conv2d(head_chs, num_features, 1, padding=pad_type) + self.bn2 = norm_act_layer(num_features, inplace=True) + self.num_features = self.head_hidden_size = num_features + else: + self.conv_head = nn.Identity() + self.bn2 = nn.Identity() + self.num_features = self.head_hidden_size = head_chs + + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^conv_stem|bn1', + blocks=[ + (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), + (r'conv_head|bn2', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + extra_blocks: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + if extra_blocks: + take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) + else: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + # forward pass + feat_idx = 0 # stem is index 0 + x = self.conv_stem(x) + x = self.bn1(x) + if feat_idx in take_indices: + intermediates.append(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index] + for blk in blocks: + feat_idx += 1 + x = blk(x) + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + if feat_idx == self.stage_ends[-1]: + x = self.conv_head(x) + x = self.bn2(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + extra_blocks: bool = False, + ): + """ Prune layers not required for specified intermediates. + """ + if extra_blocks: + take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) + else: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0 + if prune_norm or max_index < len(self.blocks): + self.conv_head = nn.Identity() + self.bn2 = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__( + self, + block_args: BlockArgs, + out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), + feature_location: str = 'bottleneck', + in_chans: int = 3, + stem_size: int = 32, + stem_kernel_size: int = 3, + fix_stem: bool = False, + output_stride: int = 32, + pad_type: str = '', + act_layer: Optional[LayerType] = None, + norm_layer: Optional[LayerType] = None, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[LayerType] = None, + round_chs_fn: Callable = round_channels, + drop_rate: float = 0., + drop_path_rate: float = 0., + ): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, + pad_type=pad_type, + round_chs_fn=round_chs_fn, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + se_layer=se_layer, + drop_path_rate=drop_path_rate, + feature_location=feature_location, + ) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(b, x) + else: + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_mode = '' + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: + features_mode = 'cfg' + else: + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + features_mode = 'cls' + + model = build_model_with_cfg( + model_cls, + variant, + pretrained, + features_only=features_mode == 'cfg', + pretrained_strict=features_mode != 'cls', + kwargs_filter=kwargs_filter, + **kwargs, + ) + if features_mode == 'cls': + model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v1( + variant, channel_multiplier=1.0, depth_multiplier=1.0, + group_size=None, fix_stem_head=False, head_conv=False, pretrained=False, **kwargs +): + """ + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['dsa_r1_k3_s1_c64'], + ['dsa_r2_k3_s2_c128'], + ['dsa_r2_k3_s2_c256'], + ['dsa_r6_k3_s2_c512'], + ['dsa_r2_k3_s2_c1024'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + head_features = (1024 if fix_stem_head else max(1024, round_chs_fn(1024))) if head_conv else 0 + model_kwargs = dict( + block_args=decode_arch_def( + arch_def, + depth_multiplier=depth_multiplier, + fix_first_last=fix_stem_head, + group_size=group_size, + ), + num_features=head_features, + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, + group_size=None, fix_stem_head=False, pretrained=False, **kwargs +): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def( + arch_def, + depth_multiplier=depth_multiplier, + fix_first_last=fix_stem_head, + group_size=group_size, + ), + num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, + group_size=None, pretrained=False, **kwargs +): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs +): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs +): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs +): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs +): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs +): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs +): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs +): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_x( + variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, + group_size=None, version=1, pretrained=False, **kwargs +): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-x-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-x-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-x-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-x-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-x-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-x-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-x-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-x-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-x-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + """ + if version == 1: + blocks_args = [ + 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', + 'r2_k3_s22_e6_i16_o24_se0.25_f1_d2_a1', + 'r2_k5_s22_e6_i24_o40_se0.25_f1_a1', + 'r3_k3_s22_e6_i40_o80_se0.25_a0', + 'r3_k5_s11_e6_i80_o112_se0.25_a0', + 'r4_k5_s22_e6_i112_o192_se0.25_a0', + 'r1_k3_s11_e6_i192_o320_se0.25_a0', + ] + elif version == 2: + blocks_args = [ + 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', + 'r2_k3_s22_e4_i16_o24_se0.25_f1_d2_a1', + 'r2_k5_s22_e4_i24_o40_se0.25_f1_a1', + 'r3_k3_s22_e4_i40_o80_se0.25_a0', + 'r3_k5_s11_e6_i80_o112_se0.25_a0', + 'r4_k5_s22_e6_i112_o192_se0.25_a0', + 'r1_k3_s11_e6_i192_o320_se0.25_a0', + ] + """ + if version == 1: + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25_d1'], + ['er_r2_k3_s2_e6_c24_se0.25_nre'], + ['er_r2_k5_s2_e6_c40_se0.25_nre'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + else: + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25_d1'], + ['er_r2_k3_s2_e4_c24_se0.25_nre'], + ['er_r2_k5_s2_e4_c40_se0.25_nre'], + ['ir_r3_k3_s2_e4_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'silu'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_tinynet(variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a TinyNet model. + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=max(1280, round_channels(1280, model_width, 8, None)), + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=model_width), + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ + Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision + """ + if 'edgetpu_v2' in variant: + stem_size = 64 + stem_kernel_size = 5 + group_size = 64 + num_features = 1280 + act_layer = resolve_act_layer(kwargs, 'relu') + + def _arch_def(chs: List[int], group_size: int): + return [ + # stage 0, 112x112 in + [f'cn_r1_k1_s1_c{chs[0]}'], # NOTE with expansion==1, official impl block ends just 1x1 pwl + # stage 1, 112x112 in + [f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'], + # stage 2, 56x56 in + [ + f'er_r1_k3_s2_e8_c{chs[2]}', + f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', + f'er_r1_k3_s1_e4_c{chs[2]}', + f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', + ], + # stage 3, 28x28 in + [f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'], + # stage 4, 14x14in + [f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'], + # stage 5, 14x14in + [f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'], + # stage 6, 7x7 in + [f'ir_r1_k3_s1_e8_c{chs[6]}'], + ] + + if 'edgetpu_v2_xs' in variant: + stem_size = 32 + stem_kernel_size = 3 + channels = [16, 32, 48, 96, 144, 160, 192] + elif 'edgetpu_v2_s' in variant: + channels = [24, 48, 64, 128, 160, 192, 256] + elif 'edgetpu_v2_m' in variant: + channels = [32, 64, 80, 160, 192, 240, 320] + num_features = 1344 + elif 'edgetpu_v2_l' in variant: + stem_kernel_size = 7 + group_size = 128 + channels = [32, 64, 96, 192, 240, 256, 384] + num_features = 1408 + else: + assert False + + arch_def = _arch_def(channels, group_size) + else: + # v1 + stem_size = 32 + stem_kernel_size = 3 + num_features = 1280 + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['cn_r1_k1_s1_c16'], + # stage 1, 112x112 in + ['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'], + # stage 2, 56x56 in + ['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e8_c192'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=num_features, + stem_size=stem_size, + stem_kernel_size=stem_kernel_size, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_test_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Minimal test EfficientNet generator. + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r1_k3_s2_e4_c24'], + ['er_r1_k3_s2_e4_c32'], + ['ir_r1_k3_s2_e4_c48_se0.25'], + ['ir_r1_k3_s2_e4_c64_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(256), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'mnasnet_050.untrained': _cfg(), + 'mnasnet_075.untrained': _cfg(), + 'mnasnet_100.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', + hf_hub_id='timm/'), + 'mnasnet_140.untrained': _cfg(), + + 'semnasnet_050.untrained': _cfg(), + 'semnasnet_075.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', + hf_hub_id='timm/'), + 'semnasnet_100.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', + hf_hub_id='timm/'), + 'semnasnet_140.untrained': _cfg(), + 'mnasnet_small.lamb_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', + hf_hub_id='timm/'), + + 'mobilenetv1_100.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + test_input_size=(3, 256, 256), test_crop_pct=0.95, + ), + 'mobilenetv1_100h.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + test_input_size=(3, 256, 256), test_crop_pct=0.95, + ), + 'mobilenetv1_125.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0, + ), + + 'mobilenetv2_035.untrained': _cfg(), + 'mobilenetv2_050.lamb_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', + hf_hub_id='timm/', + interpolation='bicubic', + ), + 'mobilenetv2_075.untrained': _cfg(), + 'mobilenetv2_100.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', + hf_hub_id='timm/'), + 'mobilenetv2_110d.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', + hf_hub_id='timm/'), + 'mobilenetv2_120d.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', + hf_hub_id='timm/'), + 'mobilenetv2_140.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', + hf_hub_id='timm/'), + + 'fbnetc_100.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + hf_hub_id='timm/', + interpolation='bilinear'), + 'spnasnet_100.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + hf_hub_id='timm/', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', + hf_hub_id='timm/'), + 'efficientnet_b0.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0), + 'efficientnet_b1.ra4_e3600_r240_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), crop_pct=0.9, pool_size=(8, 8), + test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'efficientnet_b1.ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + hf_hub_id='timm/', + test_input_size=(3, 256, 256), test_crop_pct=1.0), + 'efficientnet_b2.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'efficientnet_b3.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + hf_hub_id='timm/', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'efficientnet_b4.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + hf_hub_id='timm/', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), + 'efficientnet_b5.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), + 'efficientnet_b5.sw_in12k': _cfg( + hf_hub_id='timm/', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), + 'efficientnet_b6.untrained': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7.untrained': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8.untrained': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2.untrained': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + # FIXME experimental + 'efficientnet_b0_gn.untrained': _cfg(), + 'efficientnet_b0_g8_gn.untrained': _cfg(), + 'efficientnet_b0_g16_evos.untrained': _cfg(), + 'efficientnet_b3_gn.untrained': _cfg( + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b3_g8_gn.untrained': _cfg( + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_blur_b0.untrained': _cfg(), + + 'efficientnet_es.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', + hf_hub_id='timm/'), + 'efficientnet_em.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', + hf_hub_id='timm/'), + 'efficientnet_el_pruned.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e.untrained': _cfg(), + 'efficientnet_cc_b0_8e.untrained': _cfg(), + 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', + hf_hub_id='timm/'), + 'efficientnet_lite1.untrained': _cfg( + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2.untrained': _cfg( + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3.untrained': _cfg( + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4.untrained': _cfg( + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), pool_size=(8, 8), + crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', + hf_hub_id='timm/', + input_size=(3, 260, 260), pool_size=(9, 9), + crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), + crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + hf_hub_id='timm/', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t.agc_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + hf_hub_id='timm/', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + hf_hub_id='timm/', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m.agc_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + hf_hub_id='timm/', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s.untrained': _cfg( + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m.untrained': _cfg( + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l.untrained': _cfg( + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl.untrained': _cfg( + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + hf_hub_id='timm/', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + hf_hub_id='timm/', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + hf_hub_id='timm/', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + hf_hub_id='timm/', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + hf_hub_id='timm/', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + hf_hub_id='timm/', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + hf_hub_id='timm/', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2.ns_jft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + hf_hub_id='timm/', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_b0.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8.ap_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b5.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + hf_hub_id='timm/', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b7.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + hf_hub_id='timm/', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + hf_hub_id='timm/', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + hf_hub_id='timm/', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + hf_hub_id='timm/', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + hf_hub_id='timm/', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5.aa_in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', + hf_hub_id='timm/', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6.aa_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + hf_hub_id='timm/', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7.aa_in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', + hf_hub_id='timm/', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + + 'tf_efficientnet_b0.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', + hf_hub_id='timm/', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', + hf_hub_id='timm/', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', + hf_hub_id='timm/', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', + hf_hub_id='timm/', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5.in1k': _cfg( + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', + hf_hub_id='timm/', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + + 'tf_efficientnet_es.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'tf_efficientnetv2_s.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'tf_efficientnetv2_l.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'tf_efficientnetv2_s.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'tf_efficientnetv2_l.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'tf_efficientnetv2_xl.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + 'tf_efficientnetv2_b0.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + hf_hub_id='timm/', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + hf_hub_id='timm/', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + hf_hub_id='timm/', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), + 'tf_efficientnetv2_b3.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + 'tf_efficientnetv2_b3.in21k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s.ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', + hf_hub_id='timm/'), + 'mixnet_m.ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', + hf_hub_id='timm/'), + 'mixnet_l.ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', + hf_hub_id='timm/'), + 'mixnet_xl.ra_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', + hf_hub_id='timm/'), + 'mixnet_xxl.untrained': _cfg(), + + 'tf_mixnet_s.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', + hf_hub_id='timm/'), + 'tf_mixnet_m.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', + hf_hub_id='timm/'), + 'tf_mixnet_l.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', + hf_hub_id='timm/'), + + "tinynet_a.in1k": _cfg( + input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', + hf_hub_id='timm/'), + "tinynet_b.in1k": _cfg( + input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', + hf_hub_id='timm/'), + "tinynet_c.in1k": _cfg( + input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', + hf_hub_id='timm/'), + "tinynet_d.in1k": _cfg( + input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', + hf_hub_id='timm/'), + "tinynet_e.in1k": _cfg( + input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', + hf_hub_id='timm/'), + + 'mobilenet_edgetpu_100.untrained': _cfg( + # hf_hub_id='timm/', + input_size=(3, 224, 224), crop_pct=0.9), + 'mobilenet_edgetpu_v2_xs.untrained': _cfg( + # hf_hub_id='timm/', + input_size=(3, 224, 224), crop_pct=0.9), + 'mobilenet_edgetpu_v2_s.untrained': _cfg( + #hf_hub_id='timm/', + input_size=(3, 224, 224), crop_pct=0.9), + 'mobilenet_edgetpu_v2_m.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=0.95, + ), + 'mobilenet_edgetpu_v2_l.untrained': _cfg( + #hf_hub_id='timm/', + input_size=(3, 224, 224), crop_pct=0.9), + + "test_efficientnet.r160_in1k": _cfg( + hf_hub_id='timm/', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + "test_efficientnet_ln.r160_in1k": _cfg( + hf_hub_id='timm/', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + "test_efficientnet_gn.r160_in1k": _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), + "test_efficientnet_evos.r160_in1k": _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), +}) + + +@register_model +def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv1_100(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V1 """ + model = _gen_mobilenet_v1('mobilenetv1_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv1_100h(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V1 """ + model = _gen_mobilenet_v1('mobilenetv1_100h', 1.0, head_conv=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv1_125(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V1 """ + model = _gen_mobilenet_v1('mobilenetv1_125', 1.25, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 0.35 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 0.5 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 0.75 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +# FIXME experimental group cong / GroupNorm / EvoNorm experiments +@register_model +def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0 + GroupNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0 w/ group conv + GroupNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, + pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), + return model + + +@register_model +def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3 w/ GroupNorm """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, + norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3 w/ grouped conv + BN""" + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, + norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_blur_b0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0 w/ BlurPool """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_blur_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, + aa_layer='blurpc', **kwargs + ) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet( + 'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2 Xtra-Large. Tensorflow compatible variant + """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_x_b3(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_x( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_x_b5(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B5 """ + model = _gen_efficientnet_x( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_h_b5(pretrained=False, **kwargs) -> EfficientNet: + """ EfficientNet-B5 """ + model = _gen_efficientnet_x( + 'efficientnet_b5', channel_multiplier=1.92, depth_multiplier=2.2, version=2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenet_edgetpu_100(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet-EdgeTPU-v1 100. """ + model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_100', pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenet_edgetpu_v2_xs(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet-EdgeTPU-v2 Extra Small. """ + model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_xs', pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenet_edgetpu_v2_s(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet-EdgeTPU-v2 Small. """ + model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenet_edgetpu_v2_m(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet-EdgeTPU-v2 Medium. """ + model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenet_edgetpu_v2_l(pretrained=False, **kwargs) -> EfficientNet: + """ MobileNet-EdgeTPU-v2 Large. """ + model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def test_efficientnet(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_test_efficientnet('test_efficientnet', pretrained=pretrained, **kwargs) + return model + + +@register_model +def test_efficientnet_gn(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_test_efficientnet( + 'test_efficientnet_gn', pretrained=pretrained, norm_layer=partial(GroupNormAct, group_size=8), **kwargs) + return model + + +@register_model +def test_efficientnet_ln(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_test_efficientnet( + 'test_efficientnet_ln', pretrained=pretrained, norm_layer=LayerNormAct2d, **kwargs) + return model + + +@register_model +def test_efficientnet_evos(pretrained=False, **kwargs) -> EfficientNet: + model = _gen_test_efficientnet( + 'test_efficientnet_evos', pretrained=pretrained, norm_layer=partial(EvoNorm2dS0, group_size=8), **kwargs) + return model + + +register_model_deprecations(__name__, { + 'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', + 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', + 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', + 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', + 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', + 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', + 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', + 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', + 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', + 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', + 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', + 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', + 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', + 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', + 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', + 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', + 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', + 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', + 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', + 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', + 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', + 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', + 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', + 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', + 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', + 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', + 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', + 'efficientnet_b2a': 'efficientnet_b2', + 'efficientnet_b3a': 'efficientnet_b3', + 'mnasnet_a1': 'semnasnet_100', + 'mnasnet_b1': 'mnasnet_100', +}) diff --git a/pytorch-image-models/timm/models/efficientvit_mit.py b/pytorch-image-models/timm/models/efficientvit_mit.py new file mode 100644 index 0000000000000000000000000000000000000000..34be806b1ee66559864da6902b1ce785daad5291 --- /dev/null +++ b/pytorch-image-models/timm/models/efficientvit_mit.py @@ -0,0 +1,1072 @@ +""" EfficientViT (by MIT Song Han's Lab) + +Paper: `Efficientvit: Enhanced linear attention for high-resolution low-computation visual recognition` + - https://arxiv.org/abs/2205.14756 + +Adapted from official impl at https://github.com/mit-han-lab/efficientvit +""" + +__all__ = ['EfficientVit', 'EfficientVitLarge'] +from typing import List, Optional +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectAdaptivePool2d, create_conv2d, GELUTanh +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + + +def val2list(x: list or tuple or any, repeat_time=1): + if isinstance(x, (list, tuple)): + return list(x) + return [x for _ in range(repeat_time)] + + +def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1): + # repeat elements if necessary + x = val2list(x) + if len(x) > 0: + x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))] + + return tuple(x) + + +def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]: + if isinstance(kernel_size, tuple): + return tuple([get_same_padding(ks) for ks in kernel_size]) + else: + assert kernel_size % 2 > 0, "kernel size should be odd number" + return kernel_size // 2 + + +class ConvNormAct(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size=3, + stride=1, + dilation=1, + groups=1, + bias=False, + dropout=0., + norm_layer=nn.BatchNorm2d, + act_layer=nn.ReLU, + ): + super(ConvNormAct, self).__init__() + self.dropout = nn.Dropout(dropout, inplace=False) + self.conv = create_conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + groups=groups, + bias=bias, + ) + self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity() + self.act = act_layer(inplace=True) if act_layer is not None else nn.Identity() + + def forward(self, x): + x = self.dropout(x) + x = self.conv(x) + x = self.norm(x) + x = self.act(x) + return x + + +class DSConv(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size=3, + stride=1, + use_bias=False, + norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), + act_layer=(nn.ReLU6, None), + ): + super(DSConv, self).__init__() + use_bias = val2tuple(use_bias, 2) + norm_layer = val2tuple(norm_layer, 2) + act_layer = val2tuple(act_layer, 2) + + self.depth_conv = ConvNormAct( + in_channels, + in_channels, + kernel_size, + stride, + groups=in_channels, + norm_layer=norm_layer[0], + act_layer=act_layer[0], + bias=use_bias[0], + ) + self.point_conv = ConvNormAct( + in_channels, + out_channels, + 1, + norm_layer=norm_layer[1], + act_layer=act_layer[1], + bias=use_bias[1], + ) + + def forward(self, x): + x = self.depth_conv(x) + x = self.point_conv(x) + return x + + +class ConvBlock(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size=3, + stride=1, + mid_channels=None, + expand_ratio=1, + use_bias=False, + norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), + act_layer=(nn.ReLU6, None), + ): + super(ConvBlock, self).__init__() + use_bias = val2tuple(use_bias, 2) + norm_layer = val2tuple(norm_layer, 2) + act_layer = val2tuple(act_layer, 2) + mid_channels = mid_channels or round(in_channels * expand_ratio) + + self.conv1 = ConvNormAct( + in_channels, + mid_channels, + kernel_size, + stride, + norm_layer=norm_layer[0], + act_layer=act_layer[0], + bias=use_bias[0], + ) + self.conv2 = ConvNormAct( + mid_channels, + out_channels, + kernel_size, + 1, + norm_layer=norm_layer[1], + act_layer=act_layer[1], + bias=use_bias[1], + ) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class MBConv(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size=3, + stride=1, + mid_channels=None, + expand_ratio=6, + use_bias=False, + norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d), + act_layer=(nn.ReLU6, nn.ReLU6, None), + ): + super(MBConv, self).__init__() + use_bias = val2tuple(use_bias, 3) + norm_layer = val2tuple(norm_layer, 3) + act_layer = val2tuple(act_layer, 3) + mid_channels = mid_channels or round(in_channels * expand_ratio) + + self.inverted_conv = ConvNormAct( + in_channels, + mid_channels, + 1, + stride=1, + norm_layer=norm_layer[0], + act_layer=act_layer[0], + bias=use_bias[0], + ) + self.depth_conv = ConvNormAct( + mid_channels, + mid_channels, + kernel_size, + stride=stride, + groups=mid_channels, + norm_layer=norm_layer[1], + act_layer=act_layer[1], + bias=use_bias[1], + ) + self.point_conv = ConvNormAct( + mid_channels, + out_channels, + 1, + norm_layer=norm_layer[2], + act_layer=act_layer[2], + bias=use_bias[2], + ) + + def forward(self, x): + x = self.inverted_conv(x) + x = self.depth_conv(x) + x = self.point_conv(x) + return x + + +class FusedMBConv(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size=3, + stride=1, + mid_channels=None, + expand_ratio=6, + groups=1, + use_bias=False, + norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), + act_layer=(nn.ReLU6, None), + ): + super(FusedMBConv, self).__init__() + use_bias = val2tuple(use_bias, 2) + norm_layer = val2tuple(norm_layer, 2) + act_layer = val2tuple(act_layer, 2) + mid_channels = mid_channels or round(in_channels * expand_ratio) + + self.spatial_conv = ConvNormAct( + in_channels, + mid_channels, + kernel_size, + stride=stride, + groups=groups, + norm_layer=norm_layer[0], + act_layer=act_layer[0], + bias=use_bias[0], + ) + self.point_conv = ConvNormAct( + mid_channels, + out_channels, + 1, + norm_layer=norm_layer[1], + act_layer=act_layer[1], + bias=use_bias[1], + ) + + def forward(self, x): + x = self.spatial_conv(x) + x = self.point_conv(x) + return x + + +class LiteMLA(nn.Module): + """Lightweight multi-scale linear attention""" + + def __init__( + self, + in_channels: int, + out_channels: int, + heads: int or None = None, + heads_ratio: float = 1.0, + dim=8, + use_bias=False, + norm_layer=(None, nn.BatchNorm2d), + act_layer=(None, None), + kernel_func=nn.ReLU, + scales=(5,), + eps=1e-5, + ): + super(LiteMLA, self).__init__() + self.eps = eps + heads = heads or int(in_channels // dim * heads_ratio) + total_dim = heads * dim + use_bias = val2tuple(use_bias, 2) + norm_layer = val2tuple(norm_layer, 2) + act_layer = val2tuple(act_layer, 2) + + self.dim = dim + self.qkv = ConvNormAct( + in_channels, + 3 * total_dim, + 1, + bias=use_bias[0], + norm_layer=norm_layer[0], + act_layer=act_layer[0], + ) + self.aggreg = nn.ModuleList([ + nn.Sequential( + nn.Conv2d( + 3 * total_dim, + 3 * total_dim, + scale, + padding=get_same_padding(scale), + groups=3 * total_dim, + bias=use_bias[0], + ), + nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0]), + ) + for scale in scales + ]) + self.kernel_func = kernel_func(inplace=False) + + self.proj = ConvNormAct( + total_dim * (1 + len(scales)), + out_channels, + 1, + bias=use_bias[1], + norm_layer=norm_layer[1], + act_layer=act_layer[1], + ) + + def _attn(self, q, k, v): + dtype = v.dtype + q, k, v = q.float(), k.float(), v.float() + kv = k.transpose(-1, -2) @ v + out = q @ kv + out = out[..., :-1] / (out[..., -1:] + self.eps) + return out.to(dtype) + + def forward(self, x): + B, _, H, W = x.shape + + # generate multi-scale q, k, v + qkv = self.qkv(x) + multi_scale_qkv = [qkv] + for op in self.aggreg: + multi_scale_qkv.append(op(qkv)) + multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1) + multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2) + q, k, v = multi_scale_qkv.chunk(3, dim=-1) + + # lightweight global attention + q = self.kernel_func(q) + k = self.kernel_func(k) + v = F.pad(v, (0, 1), mode="constant", value=1.) + + if not torch.jit.is_scripting(): + with torch.autocast(device_type=v.device.type, enabled=False): + out = self._attn(q, k, v) + else: + out = self._attn(q, k, v) + + # final projection + out = out.transpose(-1, -2).reshape(B, -1, H, W) + out = self.proj(out) + return out + + +register_notrace_module(LiteMLA) + + +class EfficientVitBlock(nn.Module): + def __init__( + self, + in_channels, + heads_ratio=1.0, + head_dim=32, + expand_ratio=4, + norm_layer=nn.BatchNorm2d, + act_layer=nn.Hardswish, + ): + super(EfficientVitBlock, self).__init__() + self.context_module = ResidualBlock( + LiteMLA( + in_channels=in_channels, + out_channels=in_channels, + heads_ratio=heads_ratio, + dim=head_dim, + norm_layer=(None, norm_layer), + ), + nn.Identity(), + ) + self.local_module = ResidualBlock( + MBConv( + in_channels=in_channels, + out_channels=in_channels, + expand_ratio=expand_ratio, + use_bias=(True, True, False), + norm_layer=(None, None, norm_layer), + act_layer=(act_layer, act_layer, None), + ), + nn.Identity(), + ) + + def forward(self, x): + x = self.context_module(x) + x = self.local_module(x) + return x + + +class ResidualBlock(nn.Module): + def __init__( + self, + main: Optional[nn.Module], + shortcut: Optional[nn.Module] = None, + pre_norm: Optional[nn.Module] = None, + ): + super(ResidualBlock, self).__init__() + self.pre_norm = pre_norm if pre_norm is not None else nn.Identity() + self.main = main + self.shortcut = shortcut + + def forward(self, x): + res = self.main(self.pre_norm(x)) + if self.shortcut is not None: + res = res + self.shortcut(x) + return res + + +def build_local_block( + in_channels: int, + out_channels: int, + stride: int, + expand_ratio: float, + norm_layer: str, + act_layer: str, + fewer_norm: bool = False, + block_type: str = "default", +): + assert block_type in ["default", "large", "fused"] + if expand_ratio == 1: + if block_type == "default": + block = DSConv( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + use_bias=(True, False) if fewer_norm else False, + norm_layer=(None, norm_layer) if fewer_norm else norm_layer, + act_layer=(act_layer, None), + ) + else: + block = ConvBlock( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + use_bias=(True, False) if fewer_norm else False, + norm_layer=(None, norm_layer) if fewer_norm else norm_layer, + act_layer=(act_layer, None), + ) + else: + if block_type == "default": + block = MBConv( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + expand_ratio=expand_ratio, + use_bias=(True, True, False) if fewer_norm else False, + norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer, + act_layer=(act_layer, act_layer, None), + ) + else: + block = FusedMBConv( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + expand_ratio=expand_ratio, + use_bias=(True, False) if fewer_norm else False, + norm_layer=(None, norm_layer) if fewer_norm else norm_layer, + act_layer=(act_layer, None), + ) + return block + + +class Stem(nn.Sequential): + def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, block_type='default'): + super().__init__() + self.stride = 2 + + self.add_module( + 'in_conv', + ConvNormAct( + in_chs, out_chs, + kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer, + ) + ) + stem_block = 0 + for _ in range(depth): + self.add_module(f'res{stem_block}', ResidualBlock( + build_local_block( + in_channels=out_chs, + out_channels=out_chs, + stride=1, + expand_ratio=1, + norm_layer=norm_layer, + act_layer=act_layer, + block_type=block_type, + ), + nn.Identity(), + )) + stem_block += 1 + + +class EfficientVitStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + depth, + norm_layer, + act_layer, + expand_ratio, + head_dim, + vit_stage=False, + ): + super(EfficientVitStage, self).__init__() + blocks = [ResidualBlock( + build_local_block( + in_channels=in_chs, + out_channels=out_chs, + stride=2, + expand_ratio=expand_ratio, + norm_layer=norm_layer, + act_layer=act_layer, + fewer_norm=vit_stage, + ), + None, + )] + in_chs = out_chs + + if vit_stage: + # for stage 3, 4 + for _ in range(depth): + blocks.append( + EfficientVitBlock( + in_channels=in_chs, + head_dim=head_dim, + expand_ratio=expand_ratio, + norm_layer=norm_layer, + act_layer=act_layer, + ) + ) + else: + # for stage 1, 2 + for i in range(1, depth): + blocks.append(ResidualBlock( + build_local_block( + in_channels=in_chs, + out_channels=out_chs, + stride=1, + expand_ratio=expand_ratio, + norm_layer=norm_layer, + act_layer=act_layer + ), + nn.Identity(), + )) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + return self.blocks(x) + + +class EfficientVitLargeStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + depth, + norm_layer, + act_layer, + head_dim, + vit_stage=False, + fewer_norm=False, + ): + super(EfficientVitLargeStage, self).__init__() + blocks = [ResidualBlock( + build_local_block( + in_channels=in_chs, + out_channels=out_chs, + stride=2, + expand_ratio=24 if vit_stage else 16, + norm_layer=norm_layer, + act_layer=act_layer, + fewer_norm=vit_stage or fewer_norm, + block_type='default' if fewer_norm else 'fused', + ), + None, + )] + in_chs = out_chs + + if vit_stage: + # for stage 4 + for _ in range(depth): + blocks.append( + EfficientVitBlock( + in_channels=in_chs, + head_dim=head_dim, + expand_ratio=6, + norm_layer=norm_layer, + act_layer=act_layer, + ) + ) + else: + # for stage 1, 2, 3 + for i in range(depth): + blocks.append(ResidualBlock( + build_local_block( + in_channels=in_chs, + out_channels=out_chs, + stride=1, + expand_ratio=4, + norm_layer=norm_layer, + act_layer=act_layer, + fewer_norm=fewer_norm, + block_type='default' if fewer_norm else 'fused', + ), + nn.Identity(), + )) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + return self.blocks(x) + + +class ClassifierHead(nn.Module): + def __init__( + self, + in_channels: int, + widths: List[int], + num_classes: int = 1000, + dropout: float = 0., + norm_layer=nn.BatchNorm2d, + act_layer=nn.Hardswish, + pool_type: str = 'avg', + norm_eps: float = 1e-5, + ): + super(ClassifierHead, self).__init__() + self.widths = widths + self.num_features = widths[-1] + + assert pool_type, 'Cannot disable pooling' + self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer) + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) + self.classifier = nn.Sequential( + nn.Linear(widths[0], widths[1], bias=False), + nn.LayerNorm(widths[1], eps=norm_eps), + act_layer(inplace=True) if act_layer is not None else nn.Identity(), + nn.Dropout(dropout, inplace=False), + nn.Linear(widths[1], num_classes, bias=True) if num_classes > 0 else nn.Identity(), + ) + + def reset(self, num_classes: int, pool_type: Optional[str] = None): + if pool_type is not None: + assert pool_type, 'Cannot disable pooling' + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True,) + if num_classes > 0: + self.classifier[-1] = nn.Linear(self.num_features, num_classes, bias=True) + else: + self.classifier[-1] = nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.in_conv(x) + x = self.global_pool(x) + if pre_logits: + # cannot slice or iterate with torchscript so, this + x = self.classifier[0](x) + x = self.classifier[1](x) + x = self.classifier[2](x) + x = self.classifier[3](x) + else: + x = self.classifier(x) + return x + + +class EfficientVit(nn.Module): + def __init__( + self, + in_chans=3, + widths=(), + depths=(), + head_dim=32, + expand_ratio=4, + norm_layer=nn.BatchNorm2d, + act_layer=nn.Hardswish, + global_pool='avg', + head_widths=(), + drop_rate=0.0, + num_classes=1000, + ): + super(EfficientVit, self).__init__() + self.grad_checkpointing = False + self.global_pool = global_pool + self.num_classes = num_classes + + # input stem + self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer) + stride = self.stem.stride + + # stages + self.feature_info = [] + self.stages = nn.Sequential() + in_channels = widths[0] + for i, (w, d) in enumerate(zip(widths[1:], depths[1:])): + self.stages.append(EfficientVitStage( + in_channels, + w, + depth=d, + norm_layer=norm_layer, + act_layer=act_layer, + expand_ratio=expand_ratio, + head_dim=head_dim, + vit_stage=i >= 2, + )) + stride *= 2 + in_channels = w + self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] + + self.num_features = in_channels + self.head = ClassifierHead( + self.num_features, + widths=head_widths, + num_classes=num_classes, + dropout=drop_rate, + pool_type=self.global_pool, + ) + self.head_hidden_size = self.head.num_features + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.classifier[-1] + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class EfficientVitLarge(nn.Module): + def __init__( + self, + in_chans=3, + widths=(), + depths=(), + head_dim=32, + norm_layer=nn.BatchNorm2d, + act_layer=GELUTanh, + global_pool='avg', + head_widths=(), + drop_rate=0.0, + num_classes=1000, + norm_eps=1e-7, + ): + super(EfficientVitLarge, self).__init__() + self.grad_checkpointing = False + self.global_pool = global_pool + self.num_classes = num_classes + self.norm_eps = norm_eps + norm_layer = partial(norm_layer, eps=self.norm_eps) + + # input stem + self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer, block_type='large') + stride = self.stem.stride + + # stages + self.feature_info = [] + self.stages = nn.Sequential() + in_channels = widths[0] + for i, (w, d) in enumerate(zip(widths[1:], depths[1:])): + self.stages.append(EfficientVitLargeStage( + in_channels, + w, + depth=d, + norm_layer=norm_layer, + act_layer=act_layer, + head_dim=head_dim, + vit_stage=i >= 3, + fewer_norm=i >= 2, + )) + stride *= 2 + in_channels = w + self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] + + self.num_features = in_channels + self.head = ClassifierHead( + self.num_features, + widths=head_widths, + num_classes=num_classes, + dropout=drop_rate, + pool_type=self.global_pool, + act_layer=act_layer, + norm_eps=self.norm_eps, + ) + self.head_hidden_size = self.head.num_features + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.classifier[-1] + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.in_conv.conv', + 'classifier': 'head.classifier.4', + 'crop_pct': 0.95, + 'input_size': (3, 224, 224), + 'pool_size': (7, 7), + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'efficientvit_b0.r224_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientvit_b1.r224_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientvit_b1.r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, + ), + 'efficientvit_b1.r288_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, + ), + 'efficientvit_b2.r224_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientvit_b2.r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, + ), + 'efficientvit_b2.r288_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, + ), + 'efficientvit_b3.r224_in1k': _cfg( + hf_hub_id='timm/', + ), + 'efficientvit_b3.r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, + ), + 'efficientvit_b3.r288_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, + ), + 'efficientvit_l1.r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=1.0, + ), + 'efficientvit_l2.r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=1.0, + ), + 'efficientvit_l2.r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, + ), + 'efficientvit_l2.r288_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, + ), + 'efficientvit_l2.r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'efficientvit_l3.r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=1.0, + ), + 'efficientvit_l3.r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, + ), + 'efficientvit_l3.r320_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, + ), + 'efficientvit_l3.r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + # 'efficientvit_l0_sam.sam': _cfg( + # # hf_hub_id='timm/', + # input_size=(3, 512, 512), crop_pct=1.0, + # num_classes=0, + # ), + # 'efficientvit_l1_sam.sam': _cfg( + # # hf_hub_id='timm/', + # input_size=(3, 512, 512), crop_pct=1.0, + # num_classes=0, + # ), + # 'efficientvit_l2_sam.sam': _cfg( + # # hf_hub_id='timm/',f + # input_size=(3, 512, 512), crop_pct=1.0, + # num_classes=0, + # ), +}) + + +def _create_efficientvit(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + EfficientVit, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs + ) + return model + + +def _create_efficientvit_large(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + EfficientVitLarge, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs + ) + return model + + +@register_model +def efficientvit_b0(pretrained=False, **kwargs): + model_args = dict( + widths=(8, 16, 32, 64, 128), depths=(1, 2, 2, 2, 2), head_dim=16, head_widths=(1024, 1280)) + return _create_efficientvit('efficientvit_b0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_b1(pretrained=False, **kwargs): + model_args = dict( + widths=(16, 32, 64, 128, 256), depths=(1, 2, 3, 3, 4), head_dim=16, head_widths=(1536, 1600)) + return _create_efficientvit('efficientvit_b1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_b2(pretrained=False, **kwargs): + model_args = dict( + widths=(24, 48, 96, 192, 384), depths=(1, 3, 4, 4, 6), head_dim=32, head_widths=(2304, 2560)) + return _create_efficientvit('efficientvit_b2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_b3(pretrained=False, **kwargs): + model_args = dict( + widths=(32, 64, 128, 256, 512), depths=(1, 4, 6, 6, 9), head_dim=32, head_widths=(2304, 2560)) + return _create_efficientvit('efficientvit_b3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_l1(pretrained=False, **kwargs): + model_args = dict( + widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, head_widths=(3072, 3200)) + return _create_efficientvit_large('efficientvit_l1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_l2(pretrained=False, **kwargs): + model_args = dict( + widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(3072, 3200)) + return _create_efficientvit_large('efficientvit_l2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_l3(pretrained=False, **kwargs): + model_args = dict( + widths=(64, 128, 256, 512, 1024), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(6144, 6400)) + return _create_efficientvit_large('efficientvit_l3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +# FIXME will wait for v2 SAM models which are pending +# @register_model +# def efficientvit_l0_sam(pretrained=False, **kwargs): +# # only backbone for segment-anything-model weights +# model_args = dict( +# widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 4, 4), head_dim=32, num_classes=0, norm_eps=1e-6) +# return _create_efficientvit_large('efficientvit_l0_sam', pretrained=pretrained, **dict(model_args, **kwargs)) +# +# +# @register_model +# def efficientvit_l1_sam(pretrained=False, **kwargs): +# # only backbone for segment-anything-model weights +# model_args = dict( +# widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, num_classes=0, norm_eps=1e-6) +# return _create_efficientvit_large('efficientvit_l1_sam', pretrained=pretrained, **dict(model_args, **kwargs)) +# +# +# @register_model +# def efficientvit_l2_sam(pretrained=False, **kwargs): +# # only backbone for segment-anything-model weights +# model_args = dict( +# widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, num_classes=0, norm_eps=1e-6) +# return _create_efficientvit_large('efficientvit_l2_sam', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/efficientvit_msra.py b/pytorch-image-models/timm/models/efficientvit_msra.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8ef80a85857286b475221f3cf9d2a80acd9599 --- /dev/null +++ b/pytorch-image-models/timm/models/efficientvit_msra.py @@ -0,0 +1,659 @@ +""" EfficientViT (by MSRA) + +Paper: `EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention` + - https://arxiv.org/abs/2305.07027 + +Adapted from official impl at https://github.com/microsoft/Cream/tree/main/EfficientViT +""" + +__all__ = ['EfficientVitMsra'] +import itertools +from collections import OrderedDict +from typing import Dict, Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + + +class ConvNorm(torch.nn.Sequential): + def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): + super().__init__() + self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) + self.bn = nn.BatchNorm2d(out_chs) + torch.nn.init.constant_(self.bn.weight, bn_weight_init) + torch.nn.init.constant_(self.bn.bias, 0) + + @torch.no_grad() + def fuse(self): + c, bn = self.conv, self.bn + w = bn.weight / (bn.running_var + bn.eps)**0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / \ + (bn.running_var + bn.eps)**0.5 + m = torch.nn.Conv2d( + w.size(1) * self.conv.groups, w.size(0), w.shape[2:], + stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class NormLinear(torch.nn.Sequential): + def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.): + super().__init__() + self.bn = nn.BatchNorm1d(in_features) + self.drop = nn.Dropout(drop) + self.linear = nn.Linear(in_features, out_features, bias=bias) + + trunc_normal_(self.linear.weight, std=std) + if self.linear.bias is not None: + nn.init.constant_(self.linear.bias, 0) + + @torch.no_grad() + def fuse(self): + bn, linear = self.bn, self.linear + w = bn.weight / (bn.running_var + bn.eps)**0.5 + b = bn.bias - self.bn.running_mean * \ + self.bn.weight / (bn.running_var + bn.eps)**0.5 + w = linear.weight * w[None, :] + if linear.bias is None: + b = b @ self.linear.weight.T + else: + b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias + m = torch.nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class PatchMerging(torch.nn.Module): + def __init__(self, dim, out_dim): + super().__init__() + hid_dim = int(dim * 4) + self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0) + self.act = torch.nn.ReLU() + self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim) + self.se = SqueezeExcite(hid_dim, .25) + self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0) + + def forward(self, x): + x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x)))))) + return x + + +class ResidualDrop(torch.nn.Module): + def __init__(self, m, drop=0.): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class ConvMlp(torch.nn.Module): + def __init__(self, ed, h): + super().__init__() + self.pw1 = ConvNorm(ed, h) + self.act = torch.nn.ReLU() + self.pw2 = ConvNorm(h, ed, bn_weight_init=0) + + def forward(self, x): + x = self.pw2(self.act(self.pw1(x))) + return x + + +class CascadedGroupAttention(torch.nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + r""" Cascaded Group Attention. + + Args: + dim (int): Number of input channels. + key_dim (int): The dimension for query and key. + num_heads (int): Number of attention heads. + attn_ratio (int): Multiplier for the query dim for value dimension. + resolution (int): Input resolution, correspond to the window size. + kernels (List[int]): The kernel size of the dw conv on query. + """ + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=14, + kernels=(5, 5, 5, 5), + ): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.val_dim = int(attn_ratio * key_dim) + self.attn_ratio = attn_ratio + + qkvs = [] + dws = [] + for i in range(num_heads): + qkvs.append(ConvNorm(dim // (num_heads), self.key_dim * 2 + self.val_dim)) + dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim)) + self.qkvs = torch.nn.ModuleList(qkvs) + self.dws = torch.nn.ModuleList(dws) + self.proj = torch.nn.Sequential( + torch.nn.ReLU(), + ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0) + ) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) + self.attention_bias_cache = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): + B, C, H, W = x.shape + feats_in = x.chunk(len(self.qkvs), dim=1) + feats_out = [] + feat = feats_in[0] + attn_bias = self.get_attention_biases(x.device) + for head_idx, (qkv, dws) in enumerate(zip(self.qkvs, self.dws)): + if head_idx > 0: + feat = feat + feats_in[head_idx] + feat = qkv(feat) + q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1) + q = dws(q) + q, k, v = q.flatten(2), k.flatten(2), v.flatten(2) + q = q * self.scale + attn = q.transpose(-2, -1) @ k + attn = attn + attn_bias[head_idx] + attn = attn.softmax(dim=-1) + feat = v @ attn.transpose(-2, -1) + feat = feat.view(B, self.val_dim, H, W) + feats_out.append(feat) + x = self.proj(torch.cat(feats_out, 1)) + return x + + +class LocalWindowAttention(torch.nn.Module): + r""" Local Window Attention. + + Args: + dim (int): Number of input channels. + key_dim (int): The dimension for query and key. + num_heads (int): Number of attention heads. + attn_ratio (int): Multiplier for the query dim for value dimension. + resolution (int): Input resolution. + window_resolution (int): Local window resolution. + kernels (List[int]): The kernel size of the dw conv on query. + """ + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=14, + window_resolution=7, + kernels=(5, 5, 5, 5), + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.resolution = resolution + assert window_resolution > 0, 'window_size must be greater than 0' + self.window_resolution = window_resolution + window_resolution = min(window_resolution, resolution) + self.attn = CascadedGroupAttention( + dim, key_dim, num_heads, + attn_ratio=attn_ratio, + resolution=window_resolution, + kernels=kernels, + ) + + def forward(self, x): + H = W = self.resolution + B, C, H_, W_ = x.shape + # Only check this for classifcation models + _assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') + _assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') + if H <= self.window_resolution and W <= self.window_resolution: + x = self.attn(x) + else: + x = x.permute(0, 2, 3, 1) + pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution + pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution + x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_resolution + nW = pW // self.window_resolution + # window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw + x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3) + x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2) + x = self.attn(x) + # window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC + x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C) + x = x.transpose(2, 3).reshape(B, pH, pW, C) + x = x[:, :H, :W].contiguous() + x = x.permute(0, 3, 1, 2) + return x + + +class EfficientVitBlock(torch.nn.Module): + """ A basic EfficientVit building block. + + Args: + dim (int): Number of input channels. + key_dim (int): Dimension for query and key in the token mixer. + num_heads (int): Number of attention heads. + attn_ratio (int): Multiplier for the query dim for value dimension. + resolution (int): Input resolution. + window_resolution (int): Local window resolution. + kernels (List[int]): The kernel size of the dw conv on query. + """ + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=14, + window_resolution=7, + kernels=[5, 5, 5, 5], + ): + super().__init__() + + self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.)) + self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2))) + + self.mixer = ResidualDrop( + LocalWindowAttention( + dim, key_dim, num_heads, + attn_ratio=attn_ratio, + resolution=resolution, + window_resolution=window_resolution, + kernels=kernels, + ) + ) + + self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.)) + self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2))) + + def forward(self, x): + return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x))))) + + +class EfficientVitStage(torch.nn.Module): + def __init__( + self, + in_dim, + out_dim, + key_dim, + downsample=('', 1), + num_heads=8, + attn_ratio=4, + resolution=14, + window_resolution=7, + kernels=[5, 5, 5, 5], + depth=1, + ): + super().__init__() + if downsample[0] == 'subsample': + self.resolution = (resolution - 1) // downsample[1] + 1 + down_blocks = [] + down_blocks.append(( + 'res1', + torch.nn.Sequential( + ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)), + ResidualDrop(ConvMlp(in_dim, int(in_dim * 2))), + ) + )) + down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim))) + down_blocks.append(( + 'res2', + torch.nn.Sequential( + ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)), + ResidualDrop(ConvMlp(out_dim, int(out_dim * 2))), + ) + )) + self.downsample = nn.Sequential(OrderedDict(down_blocks)) + else: + assert in_dim == out_dim + self.downsample = nn.Identity() + self.resolution = resolution + + blocks = [] + for d in range(depth): + blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels)) + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class PatchEmbedding(torch.nn.Sequential): + def __init__(self, in_chans, dim): + super().__init__() + self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1)) + self.add_module('relu1', torch.nn.ReLU()) + self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1)) + self.add_module('relu2', torch.nn.ReLU()) + self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1)) + self.add_module('relu3', torch.nn.ReLU()) + self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1)) + self.patch_size = 16 + + +class EfficientVitMsra(nn.Module): + def __init__( + self, + img_size=224, + in_chans=3, + num_classes=1000, + embed_dim=(64, 128, 192), + key_dim=(16, 16, 16), + depth=(1, 2, 3), + num_heads=(4, 4, 4), + window_size=(7, 7, 7), + kernels=(5, 5, 5, 5), + down_ops=(('', 1), ('subsample', 2), ('subsample', 2)), + global_pool='avg', + drop_rate=0., + ): + super(EfficientVitMsra, self).__init__() + self.grad_checkpointing = False + self.num_classes = num_classes + self.drop_rate = drop_rate + + # Patch embedding + self.patch_embed = PatchEmbedding(in_chans, embed_dim[0]) + stride = self.patch_embed.patch_size + resolution = img_size // self.patch_embed.patch_size + attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))] + + # Build EfficientVit blocks + self.feature_info = [] + stages = [] + pre_ed = embed_dim[0] + for i, (ed, kd, dpth, nh, ar, wd, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)): + stage = EfficientVitStage( + in_dim=pre_ed, + out_dim=ed, + key_dim=kd, + downsample=do, + num_heads=nh, + attn_ratio=ar, + resolution=resolution, + window_resolution=wd, + kernels=kernels, + depth=dpth, + ) + pre_ed = ed + if do[0] == 'subsample' and i != 0: + stride *= do[1] + resolution = stage.resolution + stages.append(stage) + self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + if global_pool == 'avg': + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + else: + assert num_classes == 0 + self.global_pool = nn.Identity() + self.num_features = self.head_hidden_size = embed_dim[-1] + self.head = NormLinear( + self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.linear + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + if global_pool == 'avg': + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + else: + assert num_classes == 0 + self.global_pool = nn.Identity() + self.head = NormLinear( + self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +# def checkpoint_filter_fn(state_dict, model): +# if 'model' in state_dict.keys(): +# state_dict = state_dict['model'] +# tmp_dict = {} +# out_dict = {} +# target_keys = model.state_dict().keys() +# target_keys = [k for k in target_keys if k.startswith('stages.')] +# +# for k, v in state_dict.items(): +# if 'attention_bias_idxs' in k: +# continue +# k = k.split('.') +# if k[-2] == 'c': +# k[-2] = 'conv' +# if k[-2] == 'l': +# k[-2] = 'linear' +# k = '.'.join(k) +# tmp_dict[k] = v +# +# for k, v in tmp_dict.items(): +# if k.startswith('patch_embed'): +# k = k.split('.') +# k[1] = 'conv' + str(int(k[1]) // 2 + 1) +# k = '.'.join(k) +# elif k.startswith('blocks'): +# kw = '.'.join(k.split('.')[2:]) +# find_kw = [a for a in list(sorted(tmp_dict.keys())) if kw in a] +# idx = find_kw.index(k) +# k = [a for a in target_keys if kw in a][idx] +# out_dict[k] = v +# +# return out_dict + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv1.conv', + 'classifier': 'head.linear', + 'fixed_input_size': True, + 'pool_size': (4, 4), + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'efficientvit_m0.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m0.pth' + ), + 'efficientvit_m1.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m1.pth' + ), + 'efficientvit_m2.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m2.pth' + ), + 'efficientvit_m3.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m3.pth' + ), + 'efficientvit_m4.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m4.pth' + ), + 'efficientvit_m5.r224_in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m5.pth' + ), +}) + + +def _create_efficientvit_msra(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2)) + model = build_model_with_cfg( + EfficientVitMsra, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs + ) + return model + + +@register_model +def efficientvit_m0(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[64, 128, 192], + depth=[1, 2, 3], + num_heads=[4, 4, 4], + window_size=[7, 7, 7], + kernels=[5, 5, 5, 5] + ) + return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_m1(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[128, 144, 192], + depth=[1, 2, 3], + num_heads=[2, 3, 3], + window_size=[7, 7, 7], + kernels=[7, 5, 3, 3] + ) + return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_m2(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[128, 192, 224], + depth=[1, 2, 3], + num_heads=[4, 3, 2], + window_size=[7, 7, 7], + kernels=[7, 5, 3, 3] + ) + return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_m3(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[128, 240, 320], + depth=[1, 2, 3], + num_heads=[4, 3, 4], + window_size=[7, 7, 7], + kernels=[5, 5, 5, 5] + ) + return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_m4(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[128, 256, 384], + depth=[1, 2, 3], + num_heads=[4, 4, 4], + window_size=[7, 7, 7], + kernels=[7, 5, 3, 3] + ) + return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def efficientvit_m5(pretrained=False, **kwargs): + model_args = dict( + img_size=224, + embed_dim=[192, 288, 384], + depth=[1, 3, 4], + num_heads=[3, 3, 4], + window_size=[7, 7, 7], + kernels=[7, 5, 3, 3] + ) + return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/eva.py b/pytorch-image-models/timm/models/eva.py new file mode 100644 index 0000000000000000000000000000000000000000..fe871540508530f9bd16b05e440d9d609db2998f --- /dev/null +++ b/pytorch-image-models/timm/models/eva.py @@ -0,0 +1,1325 @@ +""" EVA + +EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636 + +@article{EVA, + title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, + author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, + Tiejun and Wang, Xinlong and Cao, Yue}, + journal={arXiv preprint arXiv:2211.07636}, + year={2022} +} + +EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331 +@article{EVA02, + title={EVA-02: A Visual Representation for Neon Genesis}, + author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, + journal={arXiv preprint arXiv:2303.11331}, + year={2023} +} + +This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py. + +Modifications by / Copyright 2023 Ross Wightman, original copyrights below +""" +# EVA models Copyright (c) 2022 BAAI-Vision +# EVA02 models Copyright (c) 2023 BAAI-Vision +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \ + apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \ + to_2tuple, use_fused_attn + +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._registry import generate_default_cfgs, register_model + +__all__ = ['Eva'] + + +class EvaAttention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + qkv_fused: bool = True, + num_prefix_tokens: int = 1, + qkv_bias_separate: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + attn_head_dim: Optional[int] = None, + norm_layer: Optional[Callable] = None, + ): + """ + + Args: + dim: + num_heads: + qkv_bias: + qkv_fused: + attn_drop: + proj_drop: + attn_head_dim: + norm_layer: + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = head_dim ** -0.5 + self.num_prefix_tokens = num_prefix_tokens + self.fused_attn = use_fused_attn() + self.qkv_bias_separate = qkv_bias_separate + + if qkv_fused: + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + self.q_proj = self.k_proj = self.v_proj = None + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = self.k_bias = self.v_bias = None + else: + self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) + self.k_proj = nn.Linear(dim, all_head_dim, bias=False) + self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) + self.qkv = None + self.q_bias = self.k_bias = self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity() + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward( + self, + x, + rope: Optional[torch.Tensor] = None, + attn_mask: Optional[torch.Tensor] = None, + ): + B, N, C = x.shape + + if self.qkv is not None: + if self.q_bias is None: + qkv = self.qkv(x) + else: + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) + if self.qkv_bias_separate: + qkv = self.qkv(x) + qkv += qkv_bias + else: + qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim + else: + q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C + k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) + v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) + + if rope is not None: + npt = self.num_prefix_tokens + q = torch.cat([q[:, :, :npt, :], apply_rot_embed_cat(q[:, :, npt:, :], rope)], dim=2).type_as(v) + k = torch.cat([k[:, :, :npt, :], apply_rot_embed_cat(k[:, :, npt:, :], rope)], dim=2).type_as(v) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_mask, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if attn_mask is not None: + attn_mask = attn_mask.to(torch.bool) + attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) + attn = attn.softmax(dim=-1) + + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.norm(x) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class EvaBlock(nn.Module): + + def __init__( + self, + dim: int, + num_heads: int, + qkv_bias: bool = True, + qkv_fused: bool = True, + mlp_ratio: float = 4., + swiglu_mlp: bool = False, + scale_mlp: bool = False, + scale_attn_inner: bool = False, + num_prefix_tokens: int = 1, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + init_values: Optional[float] = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + attn_head_dim: Optional[int] = None, + ): + """ + + Args: + dim: + num_heads: + qkv_bias: + qkv_fused: + mlp_ratio: + swiglu_mlp: + scale_mlp: + scale_attn_inner: + proj_drop: + attn_drop: + drop_path: + init_values: + act_layer: + norm_layer: + attn_head_dim: + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = EvaAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qkv_fused=qkv_fused, + num_prefix_tokens=num_prefix_tokens, + attn_drop=attn_drop, + proj_drop=proj_drop, + attn_head_dim=attn_head_dim, + norm_layer=norm_layer if scale_attn_inner else None, + ) + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + hidden_features = int(dim * mlp_ratio) + if swiglu_mlp: + if scale_mlp: + # when norm in SwiGLU used, an impl with separate fc for gate & x is used + self.mlp = SwiGLU( + in_features=dim, + hidden_features=hidden_features, + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + else: + # w/o any extra norm, an impl with packed weights is used, matches existing GluMLP + self.mlp = GluMlp( + in_features=dim, + hidden_features=hidden_features * 2, + norm_layer=norm_layer if scale_mlp else None, + act_layer=nn.SiLU, + gate_last=False, + drop=proj_drop, + ) + else: + self.mlp = Mlp( + in_features=dim, + hidden_features=hidden_features, + act_layer=act_layer, + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) + x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class EvaBlockPostNorm(nn.Module): + """ EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """ + def __init__( + self, + dim: int, + num_heads: int, + qkv_bias: bool = True, + qkv_fused: bool = True, + mlp_ratio: float = 4., + swiglu_mlp: bool = False, + scale_mlp: bool = False, + scale_attn_inner: bool = False, + num_prefix_tokens: int = 1, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + init_values: Optional[float] = None, # ignore for post-norm + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + attn_head_dim: Optional[int] = None, + ): + """ + + Args: + dim: + num_heads: + qkv_bias: + qkv_fused: + mlp_ratio: + swiglu_mlp: + scale_mlp: + scale_attn_inner: + proj_drop: + attn_drop: + drop_path: + init_values: + act_layer: + norm_layer: + attn_head_dim: + """ + super().__init__() + self.attn = EvaAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qkv_fused=qkv_fused, + num_prefix_tokens=num_prefix_tokens, + attn_drop=attn_drop, + proj_drop=proj_drop, + attn_head_dim=attn_head_dim, + norm_layer=norm_layer if scale_attn_inner else None, + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + hidden_features = int(dim * mlp_ratio) + if swiglu_mlp: + if scale_mlp: + # when norm in SwiGLU used, an impl with separate fc for gate & x is used + self.mlp = SwiGLU( + in_features=dim, + hidden_features=hidden_features, + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + else: + # w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP + self.mlp = GluMlp( + in_features=dim, + hidden_features=hidden_features * 2, + norm_layer=norm_layer if scale_mlp else None, + act_layer=nn.SiLU, + gate_last=False, + drop=proj_drop, + ) + else: + self.mlp = Mlp( + in_features=dim, + hidden_features=hidden_features, + act_layer=act_layer, + norm_layer=norm_layer if scale_mlp else None, + drop=proj_drop, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class Eva(nn.Module): + """ Eva Vision Transformer w/ Abs & Rotary Pos Embed + + This class implements the EVA and EVA02 models that were based on the BEiT ViT variant + * EVA - abs pos embed, global avg pool + * EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer) + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + qkv_bias: bool = True, + qkv_fused: bool = True, + mlp_ratio: float = 4., + swiglu_mlp: bool = False, + scale_mlp: bool = False, + scale_attn_inner: bool = False, + drop_rate: float = 0., + pos_drop_rate: float = 0., + patch_drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + norm_layer: Callable = LayerNorm, + init_values: Optional[float] = None, + class_token: bool = True, + num_reg_tokens: int = 0, + use_abs_pos_emb: bool = True, + use_rot_pos_emb: bool = False, + use_post_norm: bool = False, + dynamic_img_size: bool = False, + dynamic_img_pad: bool = False, + ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None, + head_init_scale: float = 0.001, + ): + """ + + Args: + img_size: + patch_size: + in_chans: + num_classes: + global_pool: + embed_dim: + depth: + num_heads: + qkv_bias: + qkv_fused: + mlp_ratio: + swiglu_mlp: + scale_mlp: + scale_attn_inner: + drop_rate: + pos_drop_rate: + proj_drop_rate: + attn_drop_rate: + drop_path_rate: + norm_layer: + init_values: + class_token: + use_abs_pos_emb: + use_rot_pos_emb: + use_post_norm: + ref_feat_shape: + head_init_scale: + """ + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.num_prefix_tokens = (1 if class_token else 0) + num_reg_tokens + self.dynamic_img_size = dynamic_img_size + self.grad_checkpointing = False + + embed_args = {} + if dynamic_img_size: + # flatten deferred until after pos embed + embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + dynamic_img_pad=dynamic_img_pad, + **embed_args, + ) + num_patches = self.patch_embed.num_patches + r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None + self.reg_token = nn.Parameter(torch.zeros(1, num_reg_tokens, embed_dim)) if num_reg_tokens else None + self.cls_embed = class_token and self.reg_token is None + + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None + self.pos_drop = nn.Dropout(p=pos_drop_rate) + if patch_drop_rate > 0: + self.patch_drop = PatchDropout( + patch_drop_rate, + num_prefix_tokens=self.num_prefix_tokens, + return_indices=True, + ) + else: + self.patch_drop = None + + if use_rot_pos_emb: + ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None + self.rope = RotaryEmbeddingCat( + embed_dim // num_heads, + in_pixels=False, + feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, + ref_feat_shape=ref_feat_shape, + ) + else: + self.rope = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock + self.blocks = nn.ModuleList([ + block_fn( + dim=embed_dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qkv_fused=qkv_fused, + mlp_ratio=mlp_ratio, + swiglu_mlp=swiglu_mlp, + scale_mlp=scale_mlp, + scale_attn_inner=scale_attn_inner, + num_prefix_tokens=self.num_prefix_tokens, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + ) + for i in range(depth)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] + + use_fc_norm = self.global_pool == 'avg' + self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + if self.cls_token is not None: + trunc_normal_(self.cls_token, std=.02) + if self.reg_token is not None: + trunc_normal_(self.reg_token, std=.02) + + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(head_init_scale) + self.head.bias.data.mul_(head_init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.zeros_(m.bias) + + @torch.jit.ignore + def no_weight_decay(self): + nwd = {'pos_embed', 'cls_token'} + return nwd + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], + ) + return matcher + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + if self.dynamic_img_size: + B, H, W, C = x.shape + if self.pos_embed is not None: + prev_grid_size = self.patch_embed.grid_size + pos_embed = resample_abs_pos_embed( + self.pos_embed, + new_size=(H, W), + old_size=prev_grid_size, + num_prefix_tokens=self.num_prefix_tokens, + ) + else: + pos_embed = None + x = x.view(B, -1, C) + rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None + else: + pos_embed = self.pos_embed + rot_pos_embed = self.rope.get_embed() if self.rope is not None else None + + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + + if pos_embed is not None: + x = x + pos_embed + + if self.reg_token is not None: + to_cat = [] + if self.cls_token is not None: + to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) + to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) + x = torch.cat(to_cat + [x], dim=1) + + x = self.pos_drop(x) + + # obtain shared rotary position embedding and apply patch dropout + if self.patch_drop is not None: + x, keep_indices = self.patch_drop(x) + if rot_pos_embed is not None and keep_indices is not None: + rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) + return x, rot_pos_embed + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + return_prefix_tokens: bool = False, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + Args: + x: Input image tensor + indices: Take last n blocks if an int, if is a sequence, select by matching indices + return_prefix_tokens: Return both prefix and spatial intermediate tokens + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format for EVA-ViT features must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x) + x, rot_pos_embed = self._pos_embed(x) + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x, rope=rot_pos_embed) + if i in take_indices: + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if self.num_prefix_tokens: + # split prefix (e.g. class, distill) and spatial feature tokens + prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] + intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] + if reshape: + # reshape to BCHW output format + H, W = self.patch_embed.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + if not torch.jit.is_scripting() and return_prefix_tokens: + # return_prefix not support in torchscript due to poor type handling + intermediates = list(zip(intermediates, prefix_tokens)) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.fc_norm = nn.Identity() + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + x, rot_pos_embed = self._pos_embed(x) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, rope=rot_pos_embed) + else: + x = blk(x, rope=rot_pos_embed) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn( + state_dict, + model, + interpolation='bicubic', + antialias=True, +): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + state_dict = state_dict.get('model_ema', state_dict) + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('module', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + # prefix for loading OpenCLIP compatible weights + if 'visual.trunk.pos_embed' in state_dict: + prefix = 'visual.trunk.' + elif 'visual.pos_embed' in state_dict: + prefix = 'visual.' + else: + prefix = '' + mim_weights = prefix + 'mask_token' in state_dict + no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict + + len_prefix = len(prefix) + for k, v in state_dict.items(): + if prefix: + if k.startswith(prefix): + k = k[len_prefix:] + else: + continue + + if 'rope' in k: + # fixed embedding no need to load buffer from checkpoint + continue + + if 'patch_embed.proj.weight' in k: + _, _, H, W = model.patch_embed.proj.weight.shape + if v.shape[-1] != W or v.shape[-2] != H: + v = resample_patch_embed( + v, + (H, W), + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # To resize pos embedding when using model at different size from pretrained weights + num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) + v = resample_abs_pos_embed( + v, + new_size=model.patch_embed.grid_size, + num_prefix_tokens=num_prefix_tokens, + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + + k = k.replace('mlp.ffn_ln', 'mlp.norm') + k = k.replace('attn.inner_attn_ln', 'attn.norm') + k = k.replace('mlp.w12', 'mlp.fc1') + k = k.replace('mlp.w1', 'mlp.fc1_g') + k = k.replace('mlp.w2', 'mlp.fc1_x') + k = k.replace('mlp.w3', 'mlp.fc2') + if no_qkv: + k = k.replace('q_bias', 'q_proj.bias') + k = k.replace('v_bias', 'v_proj.bias') + + if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): + if k == 'norm.weight' or k == 'norm.bias': + # try moving norm -> fc norm on fine-tune, probably a better starting point than new init + k = k.replace('norm', 'fc_norm') + else: + # skip pretrain mask token & head weights + continue + + out_dict[k] = v + + return out_dict + + +def _create_eva(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + Eva, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + 'license': 'mit', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + + # EVA 01 CLIP fine-tuned on imagenet-1k + 'eva_giant_patch14_224.clip_ft_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt', + hf_hub_id='timm/', + ), + 'eva_giant_patch14_336.clip_ft_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt', + hf_hub_id='timm/', + input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), + + # MIM EVA 01 pretrain, ft on in22k -> in1k + 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), + 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), + + # in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune + 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( + # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', + ), + 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( + # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', + ), + 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg( + hf_hub_id='timm/', + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', + ), + + # in22k or m3m MIM pretrain w/ in1k fine-tune + 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 336, 336), crop_pct=1.0, + ), + 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 336, 336), crop_pct=1.0, + ), + 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, + ), + 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, + ), + 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, + ), + + # in22k or m3m MIM pretrain w/ in22k fine-tune + 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, + ), + 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, + ), + 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt', + hf_hub_id='timm/', + input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, + ), + + # in22k or m38m MIM pretrain + 'eva02_tiny_patch14_224.mim_in22k': _cfg( + # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt', + hf_hub_id='timm/', + num_classes=0, + ), + 'eva02_small_patch14_224.mim_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt', + hf_hub_id='timm/', + num_classes=0, + ), + 'eva02_base_patch14_224.mim_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt', + hf_hub_id='timm/', + num_classes=0, + ), + 'eva02_large_patch14_224.mim_in22k': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt', + hf_hub_id='timm/', + num_classes=0, + ), + 'eva02_large_patch14_224.mim_m38m': _cfg( + #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt', + hf_hub_id='timm/', + num_classes=0, + ), + + # EVA01 and EVA02 CLIP image towers + 'eva_giant_patch14_clip_224.laion400m': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', + hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, + ), + 'eva_giant_patch14_clip_224.merged2b': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', + hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, + ), + 'eva02_base_patch16_clip_224.merged2b': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', + hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=512, + ), + 'eva02_large_patch14_clip_224.merged2b': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', + hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=768, + ), + 'eva02_large_patch14_clip_336.merged2b': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', + hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 336, 336), crop_pct=1.0, + num_classes=768, + ), + 'eva02_enormous_patch14_clip_224.laion2b': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', + hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, + ), + 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', + hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=1024, + ), + 'eva02_enormous_patch14_clip_224.pretrain': _cfg( + # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt', + num_classes=0, + ), + + 'vit_medium_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) + ), + 'vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) + ), + 'vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + ), + 'vit_base_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) + ), +}) + + +@register_model +def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva: + """ EVA-g model https://arxiv.org/abs/2211.07636 """ + model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) + model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva: + """ EVA-g model https://arxiv.org/abs/2211.07636 """ + model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) + model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva: + """ EVA-g model https://arxiv.org/abs/2211.07636 """ + model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) + model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=768, + depth=12, + num_heads=12, + qkv_fused=False, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + scale_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4 * 2 / 3, + qkv_fused=False, + swiglu_mlp=True, + scale_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=336, + patch_size=14, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=336, + patch_size=14, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=448, + patch_size=14, + embed_dim=768, + depth=12, + num_heads=12, + qkv_fused=False, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + scale_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=448, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4 * 2 / 3, + qkv_fused=False, + swiglu_mlp=True, + scale_mlp=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva: + """ EVA-g CLIP model (only difference from non-CLIP is the pooling) """ + model_args = dict( + patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, + global_pool=kwargs.pop('global_pool', 'token')) + model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva: + """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """ + model_args = dict( + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + qkv_fused=False, + mlp_ratio=4 * 2 / 3, + swiglu_mlp=True, + scale_mlp=True, + scale_attn_inner=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + global_pool=kwargs.pop('global_pool', 'token'), + ) + model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva: + """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4 * 2 / 3, + qkv_fused=False, + swiglu_mlp=True, + scale_mlp=True, + scale_attn_inner=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + global_pool=kwargs.pop('global_pool', 'token'), + ) + model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva: + """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ + model_args = dict( + img_size=336, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4 * 2 / 3, + qkv_fused=False, + swiglu_mlp=True, + scale_mlp=True, + scale_attn_inner=True, + use_rot_pos_emb=True, + ref_feat_shape=(16, 16), # 224/14 + global_pool=kwargs.pop('global_pool', 'token'), + ) + model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva: + """ A EVA-CLIP specific variant that uses residual post-norm in blocks """ + model_args = dict( + img_size=224, + patch_size=14, + embed_dim=1792, + depth=64, + num_heads=16, + mlp_ratio=15360 / 1792, + use_post_norm=True, + global_pool=kwargs.pop('global_pool', 'token'), + ) + model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=256, + patch_size=16, + embed_dim=512, + depth=12, + num_heads=8, + qkv_fused=True, + qkv_bias=True, + init_values=1e-5, + class_token=False, + num_reg_tokens=1, + use_rot_pos_emb=True, + use_abs_pos_emb=False, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('vit_medium_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_mediumd_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=256, + patch_size=16, + embed_dim=512, + depth=20, + num_heads=8, + qkv_fused=True, + qkv_bias=False, + init_values=1e-5, + class_token=False, + num_reg_tokens=1, + use_rot_pos_emb=True, + use_abs_pos_emb=False, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('vit_mediumd_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch16_rope_reg4_gap_256(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=256, + patch_size=16, + embed_dim=640, + depth=12, + num_heads=10, + qkv_fused=True, + qkv_bias=True, + init_values=1e-5, + class_token=False, + num_reg_tokens=4, + use_rot_pos_emb=True, + use_abs_pos_emb=False, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('vit_betwixt_patch16_rope_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: + model_args = dict( + img_size=256, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + qkv_fused=True, + qkv_bias=True, + init_values=1e-5, + class_token=False, + num_reg_tokens=1, + use_rot_pos_emb=True, + use_abs_pos_emb=False, + ref_feat_shape=(16, 16), # 224/14 + ) + model = _create_eva('vit_base_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/factory.py b/pytorch-image-models/timm/models/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..da94e1ac19afe9dcdc783698cc4c3616ee43f805 --- /dev/null +++ b/pytorch-image-models/timm/models/factory.py @@ -0,0 +1,4 @@ +from ._factory import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/fastvit.py b/pytorch-image-models/timm/models/fastvit.py new file mode 100644 index 0000000000000000000000000000000000000000..ec0d064efca423b04eff6681704abf3dc0474892 --- /dev/null +++ b/pytorch-image-models/timm/models/fastvit.py @@ -0,0 +1,1634 @@ +# FastViT for PyTorch +# +# Original implementation and weights from https://github.com/apple/ml-fastvit +# +# For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main +# Original work is copyright (C) 2023 Apple Inc. All Rights Reserved. +# +import os +from functools import partial +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \ + ClassifierHead +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['FastVit'] + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +class MobileOneBlock(nn.Module): + """MobileOne building block. + + This block has a multi-branched architecture at train-time + and plain-CNN style architecture at inference time + For more details, please refer to our paper: + `An Improved One millisecond Mobile Backbone` - + https://arxiv.org/pdf/2206.04040.pdf + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + group_size: int = 0, + inference_mode: bool = False, + use_se: bool = False, + use_act: bool = True, + use_scale_branch: bool = True, + num_conv_branches: int = 1, + act_layer: nn.Module = nn.GELU, + ) -> None: + """Construct a MobileOneBlock module. + + Args: + in_chs: Number of channels in the input. + out_chs: Number of channels produced by the block. + kernel_size: Size of the convolution kernel. + stride: Stride size. + dilation: Kernel dilation factor. + group_size: Convolution group size. + inference_mode: If True, instantiates model in inference mode. + use_se: Whether to use SE-ReLU activations. + use_act: Whether to use activation. Default: ``True`` + use_scale_branch: Whether to use scale branch. Default: ``True`` + num_conv_branches: Number of linear conv branches. + """ + super(MobileOneBlock, self).__init__() + self.inference_mode = inference_mode + self.groups = num_groups(group_size, in_chs) + self.stride = stride + self.dilation = dilation + self.kernel_size = kernel_size + self.in_chs = in_chs + self.out_chs = out_chs + self.num_conv_branches = num_conv_branches + + # Check if SE-ReLU is requested + self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity() + + if inference_mode: + self.reparam_conv = create_conv2d( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + groups=self.groups, + bias=True, + ) + else: + # Re-parameterizable skip connection + self.reparam_conv = None + + self.identity = ( + nn.BatchNorm2d(num_features=in_chs) + if out_chs == in_chs and stride == 1 + else None + ) + + # Re-parameterizable conv branches + if num_conv_branches > 0: + self.conv_kxk = nn.ModuleList([ + ConvNormAct( + self.in_chs, + self.out_chs, + kernel_size=kernel_size, + stride=self.stride, + groups=self.groups, + apply_act=False, + ) for _ in range(self.num_conv_branches) + ]) + else: + self.conv_kxk = None + + # Re-parameterizable scale branch + self.conv_scale = None + if kernel_size > 1 and use_scale_branch: + self.conv_scale = ConvNormAct( + self.in_chs, + self.out_chs, + kernel_size=1, + stride=self.stride, + groups=self.groups, + apply_act=False + ) + + self.act = act_layer() if use_act else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Apply forward pass.""" + # Inference mode forward pass. + if self.reparam_conv is not None: + return self.act(self.se(self.reparam_conv(x))) + + # Multi-branched train-time forward pass. + # Identity branch output + identity_out = 0 + if self.identity is not None: + identity_out = self.identity(x) + + # Scale branch output + scale_out = 0 + if self.conv_scale is not None: + scale_out = self.conv_scale(x) + + # Other kxk conv branches + out = scale_out + identity_out + if self.conv_kxk is not None: + for rc in self.conv_kxk: + out += rc(x) + + return self.act(self.se(out)) + + def reparameterize(self): + """Following works like `RepVGG: Making VGG-style ConvNets Great Again` - + https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched + architecture used at training time to obtain a plain CNN-like structure + for inference. + """ + if self.reparam_conv is not None: + return + + kernel, bias = self._get_kernel_bias() + self.reparam_conv = create_conv2d( + in_channels=self.in_chs, + out_channels=self.out_chs, + kernel_size=self.kernel_size, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + bias=True, + ) + self.reparam_conv.weight.data = kernel + self.reparam_conv.bias.data = bias + + # Delete un-used branches + for name, para in self.named_parameters(): + if 'reparam_conv' in name: + continue + para.detach_() + + self.__delattr__("conv_kxk") + self.__delattr__("conv_scale") + if hasattr(self, "identity"): + self.__delattr__("identity") + + self.inference_mode = True + + def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Method to obtain re-parameterized kernel and bias. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 + + Returns: + Tuple of (kernel, bias) after fusing branches. + """ + # get weights and bias of scale branch + kernel_scale = 0 + bias_scale = 0 + if self.conv_scale is not None: + kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) + # Pad scale branch kernel to match conv branch kernel size. + pad = self.kernel_size // 2 + kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) + + # get weights and bias of skip branch + kernel_identity = 0 + bias_identity = 0 + if self.identity is not None: + kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) + + # get weights and bias of conv branches + kernel_conv = 0 + bias_conv = 0 + if self.conv_kxk is not None: + for ix in range(self.num_conv_branches): + _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) + kernel_conv += _kernel + bias_conv += _bias + + kernel_final = kernel_conv + kernel_scale + kernel_identity + bias_final = bias_conv + bias_scale + bias_identity + return kernel_final, bias_final + + def _fuse_bn_tensor( + self, branch: Union[nn.Sequential, nn.BatchNorm2d] + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Method to fuse batchnorm layer with preceeding conv layer. + Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 + + Args: + branch: Sequence of ops to be fused. + + Returns: + Tuple of (kernel, bias) after fusing batchnorm. + """ + if isinstance(branch, ConvNormAct): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, "id_tensor"): + input_dim = self.in_chs // self.groups + kernel_value = torch.zeros( + (self.in_chs, input_dim, self.kernel_size, self.kernel_size), + dtype=branch.weight.dtype, + device=branch.weight.device, + ) + for i in range(self.in_chs): + kernel_value[ + i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2 + ] = 1 + self.id_tensor = kernel_value + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + +class ReparamLargeKernelConv(nn.Module): + """Building Block of RepLKNet + + This class defines overparameterized large kernel conv block + introduced in `RepLKNet `_ + + Reference: https://github.com/DingXiaoH/RepLKNet-pytorch + """ + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int, + stride: int, + group_size: int, + small_kernel: Optional[int] = None, + use_se: bool = False, + act_layer: Optional[nn.Module] = None, + inference_mode: bool = False, + ) -> None: + """Construct a ReparamLargeKernelConv module. + + Args: + in_chs: Number of input channels. + out_chs: Number of output channels. + kernel_size: Kernel size of the large kernel conv branch. + stride: Stride size. Default: 1 + group_size: Group size. Default: 1 + small_kernel: Kernel size of small kernel conv branch. + act_layer: Activation module. Default: ``nn.GELU`` + inference_mode: If True, instantiates model in inference mode. Default: ``False`` + """ + super(ReparamLargeKernelConv, self).__init__() + self.stride = stride + self.groups = num_groups(group_size, in_chs) + self.in_chs = in_chs + self.out_chs = out_chs + + self.kernel_size = kernel_size + self.small_kernel = small_kernel + if inference_mode: + self.reparam_conv = create_conv2d( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=1, + groups=self.groups, + bias=True, + ) + else: + self.reparam_conv = None + self.large_conv = ConvNormAct( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=self.stride, + groups=self.groups, + apply_act=False, + ) + if small_kernel is not None: + assert ( + small_kernel <= kernel_size + ), "The kernel size for re-param cannot be larger than the large kernel!" + self.small_conv = ConvNormAct( + in_chs, + out_chs, + kernel_size=small_kernel, + stride=self.stride, + groups=self.groups, + apply_act=False, + ) + self.se = SqueezeExcite(out_chs, rd_ratio=0.25) if use_se else nn.Identity() + # FIXME output of this act was not used in original impl, likely due to bug + self.act = act_layer() if act_layer is not None else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.reparam_conv is not None: + out = self.reparam_conv(x) + else: + out = self.large_conv(x) + if self.small_conv is not None: + out = out + self.small_conv(x) + out = self.se(out) + out = self.act(out) + return out + + def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Method to obtain re-parameterized kernel and bias. + Reference: https://github.com/DingXiaoH/RepLKNet-pytorch + + Returns: + Tuple of (kernel, bias) after fusing branches. + """ + eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn) + if hasattr(self, "small_conv"): + small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn) + eq_b += small_b + eq_k += nn.functional.pad( + small_k, [(self.kernel_size - self.small_kernel) // 2] * 4 + ) + return eq_k, eq_b + + def reparameterize(self) -> None: + """ + Following works like `RepVGG: Making VGG-style ConvNets Great Again` - + https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched + architecture used at training time to obtain a plain CNN-like structure + for inference. + """ + eq_k, eq_b = self.get_kernel_bias() + self.reparam_conv = create_conv2d( + self.in_chs, + self.out_chs, + kernel_size=self.kernel_size, + stride=self.stride, + groups=self.groups, + bias=True, + ) + + self.reparam_conv.weight.data = eq_k + self.reparam_conv.bias.data = eq_b + self.__delattr__("large_conv") + if hasattr(self, "small_conv"): + self.__delattr__("small_conv") + + @staticmethod + def _fuse_bn( + conv: nn.Conv2d, bn: nn.BatchNorm2d + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Method to fuse batchnorm layer with conv layer. + + Args: + conv: Convolutional kernel weights. + bn: Batchnorm 2d layer. + + Returns: + Tuple of (kernel, bias) after fusing batchnorm. + """ + kernel = conv.weight + running_mean = bn.running_mean + running_var = bn.running_var + gamma = bn.weight + beta = bn.bias + eps = bn.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + +def convolutional_stem( + in_chs: int, + out_chs: int, + act_layer: nn.Module = nn.GELU, + inference_mode: bool = False +) -> nn.Sequential: + """Build convolutional stem with MobileOne blocks. + + Args: + in_chs: Number of input channels. + out_chs: Number of output channels. + inference_mode: Flag to instantiate model in inference mode. Default: ``False`` + + Returns: + nn.Sequential object with stem elements. + """ + return nn.Sequential( + MobileOneBlock( + in_chs=in_chs, + out_chs=out_chs, + kernel_size=3, + stride=2, + act_layer=act_layer, + inference_mode=inference_mode, + ), + MobileOneBlock( + in_chs=out_chs, + out_chs=out_chs, + kernel_size=3, + stride=2, + group_size=1, + act_layer=act_layer, + inference_mode=inference_mode, + ), + MobileOneBlock( + in_chs=out_chs, + out_chs=out_chs, + kernel_size=1, + stride=1, + act_layer=act_layer, + inference_mode=inference_mode, + ), + ) + + +class Attention(nn.Module): + """Multi-headed Self Attention module. + + Source modified from: + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + head_dim: int = 32, + qkv_bias: bool = False, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ) -> None: + """Build MHSA module that can handle 3D or 4D input tensors. + + Args: + dim: Number of embedding dimensions. + head_dim: Number of hidden dimensions per head. Default: ``32`` + qkv_bias: Use bias or not. Default: ``False`` + attn_drop: Dropout rate for attention tensor. + proj_drop: Dropout rate for projection tensor. + """ + super().__init__() + assert dim % head_dim == 0, "dim should be divisible by head_dim" + self.head_dim = head_dim + self.num_heads = dim // head_dim + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, C, H, W = x.shape + N = H * W + x = x.flatten(2).transpose(-2, -1) # (B, N, C) + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, self.head_dim) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + if self.fused_attn: + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + x = x.transpose(-2, -1).reshape(B, C, H, W) + + return x + + +class PatchEmbed(nn.Module): + """Convolutional patch embedding layer.""" + + def __init__( + self, + patch_size: int, + stride: int, + in_chs: int, + embed_dim: int, + act_layer: nn.Module = nn.GELU, + lkc_use_act: bool = False, + use_se: bool = False, + inference_mode: bool = False, + ) -> None: + """Build patch embedding layer. + + Args: + patch_size: Patch size for embedding computation. + stride: Stride for convolutional embedding layer. + in_chs: Number of channels of input tensor. + embed_dim: Number of embedding dimensions. + inference_mode: Flag to instantiate model in inference mode. Default: ``False`` + """ + super().__init__() + self.proj = nn.Sequential( + ReparamLargeKernelConv( + in_chs=in_chs, + out_chs=embed_dim, + kernel_size=patch_size, + stride=stride, + group_size=1, + small_kernel=3, + use_se=use_se, + act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act + inference_mode=inference_mode, + ), + MobileOneBlock( + in_chs=embed_dim, + out_chs=embed_dim, + kernel_size=1, + stride=1, + use_se=False, + act_layer=act_layer, + inference_mode=inference_mode, + ) + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + return x + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class RepMixer(nn.Module): + """Reparameterizable token mixer. + + For more details, please refer to our paper: + `FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization `_ + """ + + def __init__( + self, + dim, + kernel_size=3, + layer_scale_init_value=1e-5, + inference_mode: bool = False, + ): + """Build RepMixer Module. + + Args: + dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`. + kernel_size: Kernel size for spatial mixing. Default: 3 + layer_scale_init_value: Initial value for layer scale. Default: 1e-5 + inference_mode: If True, instantiates model in inference mode. Default: ``False`` + """ + super().__init__() + self.dim = dim + self.kernel_size = kernel_size + self.inference_mode = inference_mode + + if inference_mode: + self.reparam_conv = nn.Conv2d( + self.dim, + self.dim, + kernel_size=self.kernel_size, + stride=1, + padding=self.kernel_size // 2, + groups=self.dim, + bias=True, + ) + else: + self.reparam_conv = None + self.norm = MobileOneBlock( + dim, + dim, + kernel_size, + group_size=1, + use_act=False, + use_scale_branch=False, + num_conv_branches=0, + ) + self.mixer = MobileOneBlock( + dim, + dim, + kernel_size, + group_size=1, + use_act=False, + ) + if layer_scale_init_value is not None: + self.layer_scale = LayerScale2d(dim, layer_scale_init_value) + else: + self.layer_scale = nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.reparam_conv is not None: + x = self.reparam_conv(x) + else: + x = x + self.layer_scale(self.mixer(x) - self.norm(x)) + return x + + def reparameterize(self) -> None: + """Reparameterize mixer and norm into a single + convolutional layer for efficient inference. + """ + if self.inference_mode: + return + + self.mixer.reparameterize() + self.norm.reparameterize() + + if isinstance(self.layer_scale, LayerScale2d): + w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * ( + self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight + ) + b = torch.squeeze(self.layer_scale.gamma) * ( + self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias + ) + else: + w = ( + self.mixer.id_tensor + + self.mixer.reparam_conv.weight + - self.norm.reparam_conv.weight + ) + b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias + + self.reparam_conv = create_conv2d( + self.dim, + self.dim, + kernel_size=self.kernel_size, + stride=1, + groups=self.dim, + bias=True, + ) + self.reparam_conv.weight.data = w + self.reparam_conv.bias.data = b + + for name, para in self.named_parameters(): + if 'reparam_conv' in name: + continue + para.detach_() + self.__delattr__("mixer") + self.__delattr__("norm") + self.__delattr__("layer_scale") + + +class ConvMlp(nn.Module): + """Convolutional FFN Module.""" + + def __init__( + self, + in_chs: int, + hidden_channels: Optional[int] = None, + out_chs: Optional[int] = None, + act_layer: nn.Module = nn.GELU, + drop: float = 0.0, + ) -> None: + """Build convolutional FFN module. + + Args: + in_chs: Number of input channels. + hidden_channels: Number of channels after expansion. Default: None + out_chs: Number of output channels. Default: None + act_layer: Activation layer. Default: ``GELU`` + drop: Dropout rate. Default: ``0.0``. + """ + super().__init__() + out_chs = out_chs or in_chs + hidden_channels = hidden_channels or in_chs + self.conv = ConvNormAct( + in_chs, + out_chs, + kernel_size=7, + groups=in_chs, + apply_act=False, + ) + self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1) + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1) + self.drop = nn.Dropout(drop) + self.apply(self._init_weights) + + def _init_weights(self, m: nn.Module) -> None: + if isinstance(m, nn.Conv2d): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv(x) + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class RepConditionalPosEnc(nn.Module): + """Implementation of conditional positional encoding. + + For more details refer to paper: + `Conditional Positional Encodings for Vision Transformers `_ + + In our implementation, we can reparameterize this module to eliminate a skip connection. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + spatial_shape: Union[int, Tuple[int, int]] = (7, 7), + inference_mode=False, + ) -> None: + """Build reparameterizable conditional positional encoding + + Args: + dim: Number of input channels. + dim_out: Number of embedding dimensions. Default: 768 + spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7) + inference_mode: Flag to instantiate block in inference mode. Default: ``False`` + """ + super(RepConditionalPosEnc, self).__init__() + if isinstance(spatial_shape, int): + spatial_shape = tuple([spatial_shape] * 2) + assert isinstance(spatial_shape, Tuple), ( + f'"spatial_shape" must by a sequence or int, ' + f"get {type(spatial_shape)} instead." + ) + assert len(spatial_shape) == 2, ( + f'Length of "spatial_shape" should be 2, ' + f"got {len(spatial_shape)} instead." + ) + + self.spatial_shape = spatial_shape + self.dim = dim + self.dim_out = dim_out or dim + self.groups = dim + + if inference_mode: + self.reparam_conv = nn.Conv2d( + self.dim, + self.dim_out, + kernel_size=self.spatial_shape, + stride=1, + padding=spatial_shape[0] // 2, + groups=self.groups, + bias=True, + ) + else: + self.reparam_conv = None + self.pos_enc = nn.Conv2d( + self.dim, + self.dim_out, + spatial_shape, + 1, + int(spatial_shape[0] // 2), + groups=self.groups, + bias=True, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.reparam_conv is not None: + x = self.reparam_conv(x) + else: + x = self.pos_enc(x) + x + return x + + def reparameterize(self) -> None: + # Build equivalent Id tensor + input_dim = self.dim // self.groups + kernel_value = torch.zeros( + ( + self.dim, + input_dim, + self.spatial_shape[0], + self.spatial_shape[1], + ), + dtype=self.pos_enc.weight.dtype, + device=self.pos_enc.weight.device, + ) + for i in range(self.dim): + kernel_value[ + i, + i % input_dim, + self.spatial_shape[0] // 2, + self.spatial_shape[1] // 2, + ] = 1 + id_tensor = kernel_value + + # Reparameterize Id tensor and conv + w_final = id_tensor + self.pos_enc.weight + b_final = self.pos_enc.bias + + # Introduce reparam conv + self.reparam_conv = nn.Conv2d( + self.dim, + self.dim_out, + kernel_size=self.spatial_shape, + stride=1, + padding=int(self.spatial_shape[0] // 2), + groups=self.groups, + bias=True, + ) + self.reparam_conv.weight.data = w_final + self.reparam_conv.bias.data = b_final + + for name, para in self.named_parameters(): + if 'reparam_conv' in name: + continue + para.detach_() + self.__delattr__("pos_enc") + + +class RepMixerBlock(nn.Module): + """Implementation of Metaformer block with RepMixer as token mixer. + + For more details on Metaformer structure, please refer to: + `MetaFormer Is Actually What You Need for Vision `_ + """ + + def __init__( + self, + dim: int, + kernel_size: int = 3, + mlp_ratio: float = 4.0, + act_layer: nn.Module = nn.GELU, + proj_drop: float = 0.0, + drop_path: float = 0.0, + layer_scale_init_value: float = 1e-5, + inference_mode: bool = False, + ): + """Build RepMixer Block. + + Args: + dim: Number of embedding dimensions. + kernel_size: Kernel size for repmixer. Default: 3 + mlp_ratio: MLP expansion ratio. Default: 4.0 + act_layer: Activation layer. Default: ``nn.GELU`` + proj_drop: Dropout rate. Default: 0.0 + drop_path: Drop path rate. Default: 0.0 + layer_scale_init_value: Layer scale value at initialization. Default: 1e-5 + inference_mode: Flag to instantiate block in inference mode. Default: ``False`` + """ + + super().__init__() + + self.token_mixer = RepMixer( + dim, + kernel_size=kernel_size, + layer_scale_init_value=layer_scale_init_value, + inference_mode=inference_mode, + ) + + self.mlp = ConvMlp( + in_chs=dim, + hidden_channels=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + if layer_scale_init_value is not None: + self.layer_scale = LayerScale2d(dim, layer_scale_init_value) + else: + self.layer_scale = nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + x = self.token_mixer(x) + x = x + self.drop_path(self.layer_scale(self.mlp(x))) + return x + + +class AttentionBlock(nn.Module): + """Implementation of metaformer block with MHSA as token mixer. + + For more details on Metaformer structure, please refer to: + `MetaFormer Is Actually What You Need for Vision `_ + """ + + def __init__( + self, + dim: int, + mlp_ratio: float = 4.0, + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.BatchNorm2d, + proj_drop: float = 0.0, + drop_path: float = 0.0, + layer_scale_init_value: float = 1e-5, + ): + """Build Attention Block. + + Args: + dim: Number of embedding dimensions. + mlp_ratio: MLP expansion ratio. Default: 4.0 + act_layer: Activation layer. Default: ``nn.GELU`` + norm_layer: Normalization layer. Default: ``nn.BatchNorm2d`` + proj_drop: Dropout rate. Default: 0.0 + drop_path: Drop path rate. Default: 0.0 + layer_scale_init_value: Layer scale value at initialization. Default: 1e-5 + """ + + super().__init__() + + self.norm = norm_layer(dim) + self.token_mixer = Attention(dim=dim) + if layer_scale_init_value is not None: + self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value) + else: + self.layer_scale_1 = nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.mlp = ConvMlp( + in_chs=dim, + hidden_channels=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + if layer_scale_init_value is not None: + self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value) + else: + self.layer_scale_2 = nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x)))) + x = x + self.drop_path2(self.layer_scale_2(self.mlp(x))) + return x + + +class FastVitStage(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + depth: int, + token_mixer_type: str, + downsample: bool = True, + se_downsample: bool = False, + down_patch_size: int = 7, + down_stride: int = 2, + pos_emb_layer: Optional[nn.Module] = None, + kernel_size: int = 3, + mlp_ratio: float = 4.0, + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.BatchNorm2d, + proj_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + layer_scale_init_value: Optional[float] = 1e-5, + lkc_use_act=False, + inference_mode=False, + ): + """FastViT stage. + + Args: + dim: Number of embedding dimensions. + depth: Number of blocks in stage + token_mixer_type: Token mixer type. + kernel_size: Kernel size for repmixer. + mlp_ratio: MLP expansion ratio. + act_layer: Activation layer. + norm_layer: Normalization layer. + proj_drop_rate: Dropout rate. + drop_path_rate: Drop path rate. + layer_scale_init_value: Layer scale value at initialization. + inference_mode: Flag to instantiate block in inference mode. + """ + super().__init__() + self.grad_checkpointing = False + + if downsample: + self.downsample = PatchEmbed( + patch_size=down_patch_size, + stride=down_stride, + in_chs=dim, + embed_dim=dim_out, + use_se=se_downsample, + act_layer=act_layer, + lkc_use_act=lkc_use_act, + inference_mode=inference_mode, + ) + else: + assert dim == dim_out + self.downsample = nn.Identity() + + if pos_emb_layer is not None: + self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode) + else: + self.pos_emb = nn.Identity() + + blocks = [] + for block_idx in range(depth): + if token_mixer_type == "repmixer": + blocks.append(RepMixerBlock( + dim_out, + kernel_size=kernel_size, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + proj_drop=proj_drop_rate, + drop_path=drop_path_rate[block_idx], + layer_scale_init_value=layer_scale_init_value, + inference_mode=inference_mode, + )) + elif token_mixer_type == "attention": + blocks.append(AttentionBlock( + dim_out, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + norm_layer=norm_layer, + proj_drop=proj_drop_rate, + drop_path=drop_path_rate[block_idx], + layer_scale_init_value=layer_scale_init_value, + )) + else: + raise ValueError( + "Token mixer type: {} not supported".format(token_mixer_type) + ) + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.pos_emb(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class FastVit(nn.Module): + fork_feat: torch.jit.Final[bool] + + """ + This class implements `FastViT architecture `_ + """ + + def __init__( + self, + in_chans: int = 3, + layers: Tuple[int, ...] = (2, 2, 6, 2), + token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"), + embed_dims: Tuple[int, ...] = (64, 128, 256, 512), + mlp_ratios: Tuple[float, ...] = (4,) * 4, + downsamples: Tuple[bool, ...] = (False, True, True, True), + se_downsamples: Tuple[bool, ...] = (False, False, False, False), + repmixer_kernel_size: int = 3, + num_classes: int = 1000, + pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4, + down_patch_size: int = 7, + down_stride: int = 2, + drop_rate: float = 0.0, + proj_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + layer_scale_init_value: float = 1e-5, + lkc_use_act: bool = False, + fork_feat: bool = False, + cls_ratio: float = 2.0, + global_pool: str = 'avg', + norm_layer: nn.Module = nn.BatchNorm2d, + act_layer: nn.Module = nn.GELU, + inference_mode: bool = False, + ) -> None: + super().__init__() + self.num_classes = 0 if fork_feat else num_classes + self.fork_feat = fork_feat + self.global_pool = global_pool + self.feature_info = [] + + # Convolutional stem + self.stem = convolutional_stem( + in_chans, + embed_dims[0], + act_layer, + inference_mode, + ) + + # Build the main stages of the network architecture + prev_dim = embed_dims[0] + scale = 1 + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + stages = [] + for i in range(len(layers)): + downsample = downsamples[i] or prev_dim != embed_dims[i] + stage = FastVitStage( + dim=prev_dim, + dim_out=embed_dims[i], + depth=layers[i], + downsample=downsample, + se_downsample=se_downsamples[i], + down_patch_size=down_patch_size, + down_stride=down_stride, + pos_emb_layer=pos_embs[i], + token_mixer_type=token_mixers[i], + kernel_size=repmixer_kernel_size, + mlp_ratio=mlp_ratios[i], + act_layer=act_layer, + norm_layer=norm_layer, + proj_drop_rate=proj_drop_rate, + drop_path_rate=dpr[i], + layer_scale_init_value=layer_scale_init_value, + lkc_use_act=lkc_use_act, + inference_mode=inference_mode, + ) + stages.append(stage) + prev_dim = embed_dims[i] + if downsample: + scale *= 2 + self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + self.num_stages = len(self.stages) + self.num_features = self.head_hidden_size = prev_dim + + # For segmentation and detection, extract intermediate output + if self.fork_feat: + # Add a norm layer for each output. self.stages is slightly different than self.network + # in the original code, the PatchEmbed layer is part of self.stages in this code where + # it was part of self.network in the original code. So we do not need to skip out indices. + self.out_indices = [0, 1, 2, 3] + for i_emb, i_layer in enumerate(self.out_indices): + if i_emb == 0 and os.environ.get("FORK_LAST3", None): + """For RetinaNet, `start_level=1`. The first norm layer will not used. + cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...` + """ + layer = nn.Identity() + else: + layer = norm_layer(embed_dims[i_emb]) + layer_name = f"norm{i_layer}" + self.add_module(layer_name, layer) + else: + # Classifier head + self.num_features = self.head_hidden_size = final_features = int(embed_dims[-1] * cls_ratio) + self.final_conv = MobileOneBlock( + in_chs=embed_dims[-1], + out_chs=final_features, + kernel_size=3, + stride=1, + group_size=1, + inference_mode=inference_mode, + use_se=True, + act_layer=act_layer, + num_conv_branches=1, + ) + self.head = ClassifierHead( + final_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + self.apply(self._init_weights) + + def _init_weights(self, m: nn.Module) -> None: + """Init. for classification""" + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return set() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', # stem and embed + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+).pos_emb', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages), indices) + + # forward pass + x = self.stem(x) + last_idx = self.num_stages - 1 + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index + 1] + feat_idx = 0 + for feat_idx, stage in enumerate(stages): + x = stage(x) + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + if feat_idx == last_idx: + x = self.final_conv(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages), indices) + self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + # input embedding + x = self.stem(x) + outs = [] + for idx, block in enumerate(self.stages): + x = block(x) + if self.fork_feat: + if idx in self.out_indices: + norm_layer = getattr(self, f"norm{idx}") + x_out = norm_layer(x) + outs.append(x_out) + if self.fork_feat: + # output the features of four stages for dense prediction + return outs + x = self.final_conv(x) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + if self.fork_feat: + return x + x = self.forward_head(x) + return x + + +def _cfg(url="", **kwargs): + return { + "url": url, + "num_classes": 1000, + "input_size": (3, 256, 256), + "pool_size": (8, 8), + "crop_pct": 0.9, + "interpolation": "bicubic", + "mean": IMAGENET_DEFAULT_MEAN, + "std": IMAGENET_DEFAULT_STD, + 'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'), + "classifier": "head.fc", + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + "fastvit_t8.apple_in1k": _cfg( + hf_hub_id='timm/'), + "fastvit_t12.apple_in1k": _cfg( + hf_hub_id='timm/'), + + "fastvit_s12.apple_in1k": _cfg( + hf_hub_id='timm/'), + "fastvit_sa12.apple_in1k": _cfg( + hf_hub_id='timm/'), + "fastvit_sa24.apple_in1k": _cfg( + hf_hub_id='timm/'), + "fastvit_sa36.apple_in1k": _cfg( + hf_hub_id='timm/'), + + "fastvit_ma36.apple_in1k": _cfg( + hf_hub_id='timm/', + crop_pct=0.95), + + "fastvit_t8.apple_dist_in1k": _cfg( + hf_hub_id='timm/'), + "fastvit_t12.apple_dist_in1k": _cfg( + hf_hub_id='timm/'), + + "fastvit_s12.apple_dist_in1k": _cfg( + hf_hub_id='timm/',), + "fastvit_sa12.apple_dist_in1k": _cfg( + hf_hub_id='timm/',), + "fastvit_sa24.apple_dist_in1k": _cfg( + hf_hub_id='timm/',), + "fastvit_sa36.apple_dist_in1k": _cfg( + hf_hub_id='timm/',), + + "fastvit_ma36.apple_dist_in1k": _cfg( + hf_hub_id='timm/', + crop_pct=0.95 + ), + + "fastvit_mci0.apple_mclip": _cfg( + hf_hub_id='apple/mobileclip_s0_timm', + url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt', + crop_pct=0.95, + num_classes=512, # CLIP proj dim + mean=(0., 0., 0.), std=(1., 1., 1.) + ), + "fastvit_mci1.apple_mclip": _cfg( + hf_hub_id='apple/mobileclip_s1_timm', + url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s1.pt', + crop_pct=0.95, + num_classes=512, # CLIP proj dim + mean=(0., 0., 0.), std=(1., 1., 1.) + ), + "fastvit_mci2.apple_mclip": _cfg( + hf_hub_id='apple/mobileclip_s2_timm', + url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s2.pt', + crop_pct=0.95, + num_classes=512, # CLIP proj dim + mean=(0., 0., 0.), std=(1., 1., 1.) + ), +}) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'stem.0.conv_kxk.0.conv.weight' in state_dict: + return state_dict # non-original checkpoint, no remapping needed + + state_dict = state_dict.get('state_dict', state_dict) + if 'image_encoder.model.patch_embed.0.rbr_conv.0.conv.weight' in state_dict: + # remap MobileCLIP checkpoints + prefix = 'image_encoder.model.' + else: + prefix = '' + + import re + import bisect + + # find stage ends by locating downsample layers + stage_ends = [] + for k, v in state_dict.items(): + match = re.match(r'^(.*?)network\.(\d+)\.proj.*', k) + if match: + stage_ends.append(int(match.group(2))) + stage_ends = list(sorted(set(stage_ends))) + + out_dict = {} + for k, v in state_dict.items(): + if prefix: + if prefix not in k: + continue + k = k.replace(prefix, '') + + # remap renamed layers + k = k.replace('patch_embed', 'stem') + k = k.replace('rbr_conv', 'conv_kxk') + k = k.replace('rbr_scale', 'conv_scale') + k = k.replace('rbr_skip', 'identity') + k = k.replace('conv_exp', 'final_conv') # to match byobnet, regnet, nfnet + k = k.replace('lkb_origin', 'large_conv') + k = k.replace('convffn', 'mlp') + k = k.replace('se.reduce', 'se.fc1') + k = k.replace('se.expand', 'se.fc2') + k = re.sub(r'layer_scale_([0-9])', r'layer_scale_\1.gamma', k) + if k.endswith('layer_scale'): + k = k.replace('layer_scale', 'layer_scale.gamma') + k = k.replace('dist_head', 'head_dist') + if k.startswith('head.'): + if k == 'head.proj' and hasattr(model.head, 'fc') and isinstance(model.head.fc, nn.Linear): + # if CLIP projection, map to head.fc w/ bias = zeros + k = k.replace('head.proj', 'head.fc.weight') + v = v.T + out_dict['head.fc.bias'] = torch.zeros(v.shape[0]) + else: + k = k.replace('head.', 'head.fc.') + + # remap flat sequential network to stages + match = re.match(r'^network\.(\d+)', k) + stage_idx, net_idx = None, None + if match: + net_idx = int(match.group(1)) + stage_idx = bisect.bisect_right(stage_ends, net_idx) + if stage_idx is not None: + net_prefix = f'network.{net_idx}' + stage_prefix = f'stages.{stage_idx}' + if net_prefix + '.proj' in k: + k = k.replace(net_prefix + '.proj', stage_prefix + '.downsample.proj') + elif net_prefix + '.pe' in k: + k = k.replace(net_prefix + '.pe', stage_prefix + '.pos_emb.pos_enc') + else: + k = k.replace(net_prefix, stage_prefix + '.blocks') + + out_dict[k] = v + return out_dict + + +def _create_fastvit(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + FastVit, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs + ) + return model + + +@register_model +def fastvit_t8(pretrained=False, **kwargs): + """Instantiate FastViT-T8 model variant.""" + model_args = dict( + layers=(2, 2, 4, 2), + embed_dims=(48, 96, 192, 384), + mlp_ratios=(3, 3, 3, 3), + token_mixers=("repmixer", "repmixer", "repmixer", "repmixer") + ) + return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_t12(pretrained=False, **kwargs): + """Instantiate FastViT-T12 model variant.""" + model_args = dict( + layers=(2, 2, 6, 2), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(3, 3, 3, 3), + token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"), + ) + return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_s12(pretrained=False, **kwargs): + """Instantiate FastViT-S12 model variant.""" + model_args = dict( + layers=(2, 2, 6, 2), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(4, 4, 4, 4), + token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"), + ) + return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_sa12(pretrained=False, **kwargs): + """Instantiate FastViT-SA12 model variant.""" + model_args = dict( + layers=(2, 2, 6, 2), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(4, 4, 4, 4), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + ) + return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_sa24(pretrained=False, **kwargs): + """Instantiate FastViT-SA24 model variant.""" + model_args = dict( + layers=(4, 4, 12, 4), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(4, 4, 4, 4), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + ) + return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_sa36(pretrained=False, **kwargs): + """Instantiate FastViT-SA36 model variant.""" + model_args = dict( + layers=(6, 6, 18, 6), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(4, 4, 4, 4), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + ) + return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_ma36(pretrained=False, **kwargs): + """Instantiate FastViT-MA36 model variant.""" + model_args = dict( + layers=(6, 6, 18, 6), + embed_dims=(76, 152, 304, 608), + mlp_ratios=(4, 4, 4, 4), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention") + ) + return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_mci0(pretrained=False, **kwargs): + """Instantiate MCi0 model variant.""" + model_args = dict( + layers=(2, 6, 10, 2), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(3, 3, 3, 3), + se_downsamples=(False, False, True, True), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + lkc_use_act=True, + ) + return _create_fastvit('fastvit_mci0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_mci1(pretrained=False, **kwargs): + """Instantiate MCi1 model variant.""" + model_args = dict( + layers=(4, 12, 20, 4), + embed_dims=(64, 128, 256, 512), + mlp_ratios=(3, 3, 3, 3), + se_downsamples=(False, False, True, True), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + lkc_use_act=True, + ) + return _create_fastvit('fastvit_mci1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def fastvit_mci2(pretrained=False, **kwargs): + """Instantiate MCi2 model variant.""" + model_args = dict( + layers=(4, 12, 24, 4), + embed_dims=(80, 160, 320, 640), + mlp_ratios=(3, 3, 3, 3), + se_downsamples=(False, False, True, True), + pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), + token_mixers=("repmixer", "repmixer", "repmixer", "attention"), + lkc_use_act=True, + ) + return _create_fastvit('fastvit_mci2', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/features.py b/pytorch-image-models/timm/models/features.py new file mode 100644 index 0000000000000000000000000000000000000000..f937e1626e4e83d2776bfbabb8f96af802f55802 --- /dev/null +++ b/pytorch-image-models/timm/models/features.py @@ -0,0 +1,4 @@ +from ._features import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/hrnet.py b/pytorch-image-models/timm/models/hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..75b157d67d0cdd2efacc78c43cf0e498168a24b4 --- /dev/null +++ b/pytorch-image-models/timm/models/hrnet.py @@ -0,0 +1,979 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import create_classifier +from ._builder import build_model_with_cfg, pretrained_cfg_for_features +from ._features import FeatureInfo +from ._registry import register_model, generate_default_cfgs +from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE + +__all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +cfg_cls = dict( + hrnet_w18_small=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(1,), + num_channels=(32,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(2, 2), + num_channels=(16, 32), + fuse_method='SUM' + ), + stage3=dict( + num_modules=1, + num_branches=3, + block_type='BASIC', + num_blocks=(2, 2, 2), + num_channels=(16, 32, 64), + fuse_method='SUM' + ), + stage4=dict( + num_modules=1, + num_branches=4, + block_type='BASIC', + num_blocks=(2, 2, 2, 2), + num_channels=(16, 32, 64, 128), + fuse_method='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(2,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(2, 2), + num_channels=(18, 36), + fuse_method='SUM' + ), + stage3=dict( + num_modules=3, + num_branches=3, + block_type='BASIC', + num_blocks=(2, 2, 2), + num_channels=(18, 36, 72), + fuse_method='SUM' + ), + stage4=dict( + num_modules=2, + num_branches=4, + block_type='BASIC', + num_blocks=(2, 2, 2, 2), + num_channels=(18, 36, 72, 144), + fuse_method='SUM', + ), + ), + + hrnet_w18=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144), + fuse_method='SUM', + ), + ), + + hrnet_w30=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(30, 60), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(30, 60, 120), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(30, 60, 120, 240), + fuse_method='SUM', + ), + ), + + hrnet_w32=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256), + fuse_method='SUM', + ), + ), + + hrnet_w40=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(40, 80), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(40, 80, 160), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(40, 80, 160, 320), + fuse_method='SUM', + ), + ), + + hrnet_w44=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(44, 88), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(44, 88, 176), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(44, 88, 176, 352), + fuse_method='SUM', + ), + ), + + hrnet_w48=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(48, 96), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(48, 96, 192), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(48, 96, 192, 384), + fuse_method='SUM', + ), + ), + + hrnet_w64=dict( + stem_width=64, + stage1=dict( + num_modules=1, + num_branches=1, + block_type='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,), + fuse_method='SUM', + ), + stage2=dict( + num_modules=1, + num_branches=2, + block_type='BASIC', + num_blocks=(4, 4), + num_channels=(64, 128), + fuse_method='SUM' + ), + stage3=dict( + num_modules=4, + num_branches=3, + block_type='BASIC', + num_blocks=(4, 4, 4), + num_channels=(64, 128, 256), + fuse_method='SUM' + ), + stage4=dict( + num_modules=3, + num_branches=4, + block_type='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(64, 128, 256, 512), + fuse_method='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__( + self, + num_branches, + block_types, + num_blocks, + num_in_chs, + num_channels, + fuse_method, + multi_scale_output=True, + ): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, + block_types, + num_blocks, + num_in_chs, + num_channels, + ) + + self.num_in_chs = num_in_chs + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, + block_types, + num_blocks, + num_channels, + ) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_in_chs): + error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] + self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block_type, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_in_chs = self.num_in_chs + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_out_chs_conv3x3 = num_in_chs[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM) + )) + else: + num_out_chs_conv3x3 = num_in_chs[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False) + )) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_in_chs(self): + return self.num_in_chs + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = None + for j, f in enumerate(fuse_outer): + if y is None: + y = f(x[j]) + else: + y = y + f(x[j]) + x_fuse.append(self.fuse_act(y)) + return x_fuse + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +@torch.jit.interface +class ModuleInterface(torch.nn.Module): + def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward + pass + + +block_types_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__( + self, + cfg, + in_chans=3, + num_classes=1000, + output_stride=32, + global_pool='avg', + drop_rate=0.0, + head='classification', + **kwargs, + ): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + assert output_stride == 32 # FIXME support dilation + + cfg.update(**kwargs) + stem_width = cfg['stem_width'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = block_types_dict[self.stage1_cfg['block_type']] + num_blocks = self.stage1_cfg['num_blocks'][0] + self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) + stage1_out_channel = block_type.expansion * num_channels + + self.stage2_cfg = cfg['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = block_types_dict[self.stage2_cfg['block_type']] + num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = block_types_dict[self.stage3_cfg['block_type']] + num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = block_types_dict[self.stage4_cfg['block_type']] + num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + head_conv_bias = cfg.pop('head_conv_bias', True) + if head == 'classification': + # Classification Head + self.num_features = self.head_hidden_size = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head( + pre_stage_channels, + conv_bias=head_conv_bias, + ) + self.global_pool, self.head_drop, self.classifier = create_classifier( + self.num_features, + self.num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + else: + if head == 'incre': + self.num_features = self.head_hidden_size = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True) + else: + self.num_features = self.head_hidden_size = 256 + self.incre_modules = None + self.global_pool = nn.Identity() + self.head_drop = nn.Identity() + self.classifier = nn.Identity() + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block_type expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): + head_block_type = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block_type.expansion + out_channels = self.head_channels[i + 1] * head_block_type.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, + kernel_size=3, stride=2, padding=1, bias=conv_bias), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, + kernel_size=1, stride=1, padding=0, bias=conv_bias), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + _in_chs = num_channels_pre_layer[-1] + _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs + conv3x3s.append(nn.Sequential( + nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), + nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block_type.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block_type(inplanes, planes, stride, downsample)] + inplanes = planes * block_type.expansion + for i in range(1, block_types): + layers.append(block_type(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block_type = block_types_dict[layer_config['block_type']] + fuse_method = layer_config['fuse_method'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) + ) + num_in_chs = modules[-1].get_num_in_chs() + + return SequentialList(*modules), num_in_chs + + @torch.jit.ignore + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv[12]|bn[12]', + block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [ + (r'^layer(\d+)\.(\d+)', None), + (r'^stage(\d+)\.(\d+)', None), + (r'^transition(\d+)', (99999,)), + ], + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "gradient checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + if self.incre_modules is None or self.downsamp_modules is None: + return yl + + y = None + for i, incre in enumerate(self.incre_modules): + if y is None: + y = incre(yl[i]) + else: + down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing + y = incre(yl[i]) + down.forward(y) + + y = self.final_layer(y) + return y + + def forward_head(self, x, pre_logits: bool = False): + # Classification Head + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + y = self.forward_features(x) + x = self.forward_head(y) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__( + self, + cfg, + in_chans=3, + num_classes=1000, + output_stride=32, + global_pool='avg', + drop_rate=0.0, + feature_location='incre', + out_indices=(0, 1, 2, 3, 4), + **kwargs, + ): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, + in_chans=in_chans, + num_classes=num_classes, + output_stride=output_stride, + global_pool=global_pool, + drop_rate=drop_rate, + head=feature_location, + **kwargs, + ) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.Tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + cfg_variant = cfg_variant or variant + + pretrained_strict = model_kwargs.pop( + 'pretrained_strict', + not features_only and model_kwargs.get('head', 'classification') == 'classification' + ) + model = build_model_with_cfg( + model_cls, + variant, + pretrained, + model_cfg=cfg_cls[cfg_variant], + pretrained_strict=pretrained_strict, + kwargs_filter=kwargs_filter, + **model_kwargs, + ) + if features_only: + model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) + model.default_cfg = model.pretrained_cfg # backwards compat + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), + 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), + 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w18.ms_aug_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, + ), + 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), + 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), + + 'hrnet_w18_ssld.paddle_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) + ), + 'hrnet_w48_ssld.paddle_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) + ), +}) + + +@register_model +def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: + return _create_hrnet('hrnet_w64', pretrained, **kwargs) + + +@register_model +def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: + kwargs.setdefault('head_conv_bias', False) + return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) + + +@register_model +def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: + kwargs.setdefault('head_conv_bias', False) + return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs) +