|
""" CLIP Model |
|
|
|
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. |
|
""" |
|
from dataclasses import dataclass |
|
from typing import Optional, Tuple, Union |
|
|
|
import numpy as np |
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn |
|
|
|
from .transformer import LayerNormFp32, LayerNorm, QuickGELU, VisionTransformer, TextTransformer |
|
|
|
|
|
@dataclass |
|
class CLIPVisionCfg: |
|
layers: Union[Tuple[int, int, int, int], int] = 12 |
|
width: int = 768 |
|
head_width: int = 64 |
|
mlp_ratio: float = 4.0 |
|
patch_size: int = 16 |
|
image_size: Union[Tuple[int, int], int] = 224 |
|
|
|
ls_init_value: Optional[float] = None |
|
patch_dropout: float = 0. |
|
input_patchnorm: bool = False |
|
global_average_pool: bool = False |
|
attentional_pool: bool = False |
|
n_queries: int = 256 |
|
attn_pooler_heads: int = 8 |
|
output_tokens: bool = False |
|
|
|
timm_model_name: str = None |
|
timm_model_pretrained: bool = False |
|
timm_pool: str = 'avg' |
|
timm_proj: str = 'linear' |
|
timm_proj_bias: bool = False |
|
timm_drop: float = 0. |
|
timm_drop_path: Optional[float] = None |
|
|
|
|
|
@dataclass |
|
class CLIPTextCfg: |
|
context_length: int = 77 |
|
vocab_size: int = 49408 |
|
width: int = 512 |
|
heads: int = 8 |
|
layers: int = 12 |
|
ls_init_value: Optional[float] = None |
|
hf_model_name: str = None |
|
hf_tokenizer_name: str = None |
|
hf_model_pretrained: bool = True |
|
proj: str = 'mlp' |
|
pooler_type: str = 'mean_pooler' |
|
embed_cls: bool = False |
|
pad_id: int = 0 |
|
output_tokens: bool = False |
|
|
|
|
|
def get_cast_dtype(precision: str): |
|
cast_dtype = None |
|
if precision == 'bf16': |
|
cast_dtype = torch.bfloat16 |
|
elif precision == 'fp16': |
|
cast_dtype = torch.float16 |
|
return cast_dtype |
|
|
|
|
|
def _build_vision_tower( |
|
embed_dim: int, |
|
vision_cfg: CLIPVisionCfg, |
|
quick_gelu: bool = False, |
|
cast_dtype: Optional[torch.dtype] = None |
|
): |
|
if isinstance(vision_cfg, dict): |
|
vision_cfg = CLIPVisionCfg(**vision_cfg) |
|
|
|
|
|
|
|
|
|
act_layer = QuickGELU if quick_gelu else nn.GELU |
|
|
|
vision_heads = vision_cfg.width // vision_cfg.head_width |
|
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm |
|
visual = VisionTransformer( |
|
image_size=vision_cfg.image_size, |
|
patch_size=vision_cfg.patch_size, |
|
width=vision_cfg.width, |
|
layers=vision_cfg.layers, |
|
heads=vision_heads, |
|
mlp_ratio=vision_cfg.mlp_ratio, |
|
ls_init_value=vision_cfg.ls_init_value, |
|
patch_dropout=vision_cfg.patch_dropout, |
|
input_patchnorm=vision_cfg.input_patchnorm, |
|
global_average_pool=vision_cfg.global_average_pool, |
|
attentional_pool=vision_cfg.attentional_pool, |
|
n_queries=vision_cfg.n_queries, |
|
attn_pooler_heads=vision_cfg.attn_pooler_heads, |
|
output_tokens=vision_cfg.output_tokens, |
|
output_dim=embed_dim, |
|
act_layer=act_layer, |
|
norm_layer=norm_layer, |
|
) |
|
|
|
return visual |
|
|
|
|
|
def _build_text_tower( |
|
embed_dim: int, |
|
text_cfg: CLIPTextCfg, |
|
quick_gelu: bool = False, |
|
cast_dtype: Optional[torch.dtype] = None, |
|
): |
|
if isinstance(text_cfg, dict): |
|
text_cfg = CLIPTextCfg(**text_cfg) |
|
|
|
act_layer = QuickGELU if quick_gelu else nn.GELU |
|
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm |
|
|
|
text = TextTransformer( |
|
context_length=text_cfg.context_length, |
|
vocab_size=text_cfg.vocab_size, |
|
width=text_cfg.width, |
|
heads=text_cfg.heads, |
|
layers=text_cfg.layers, |
|
ls_init_value=text_cfg.ls_init_value, |
|
output_dim=embed_dim, |
|
embed_cls=text_cfg.embed_cls, |
|
output_tokens=text_cfg.output_tokens, |
|
pad_id=text_cfg.pad_id, |
|
act_layer=act_layer, |
|
norm_layer=norm_layer, |
|
) |
|
return text |
|
|
|
|
|
class CLIP(nn.Module): |
|
output_dict: torch.jit.Final[bool] |
|
|
|
def __init__( |
|
self, |
|
embed_dim: int, |
|
vision_cfg: CLIPVisionCfg, |
|
text_cfg: CLIPTextCfg, |
|
quick_gelu: bool = False, |
|
cast_dtype: Optional[torch.dtype] = None, |
|
output_dict: bool = False, |
|
): |
|
super().__init__() |
|
self.output_dict = output_dict |
|
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) |
|
|
|
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) |
|
self.transformer = text.transformer |
|
self.context_length = text.context_length |
|
self.vocab_size = text.vocab_size |
|
self.token_embedding = text.token_embedding |
|
self.positional_embedding = text.positional_embedding |
|
self.ln_final = text.ln_final |
|
self.text_projection = text.text_projection |
|
self.register_buffer('attn_mask', text.attn_mask, persistent=False) |
|
|
|
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) |
|
|
|
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): |
|
|
|
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) |
|
|
|
@torch.jit.ignore |
|
def set_grad_checkpointing(self, enable=True): |
|
self.visual.set_grad_checkpointing(enable) |
|
self.transformer.grad_checkpointing = enable |
|
|
|
def encode_image(self, image, normalize: bool = False): |
|
features = self.visual(image) |
|
return F.normalize(features, dim=-1) if normalize else features |
|
|
|
def encode_text(self, text, normalize: bool = False): |
|
cast_dtype = self.transformer.get_cast_dtype() |
|
|
|
x = self.token_embedding(text).to(cast_dtype) |
|
|
|
x = x + self.positional_embedding.to(cast_dtype) |
|
x = x.permute(1, 0, 2) |
|
x = self.transformer(x, attn_mask=self.attn_mask) |
|
x = x.permute(1, 0, 2) |
|
x = self.ln_final(x) |
|
|
|
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection |
|
return F.normalize(x, dim=-1) if normalize else x |
|
|
|
def forward( |
|
self, |
|
image: Optional[torch.Tensor] = None, |
|
text: Optional[torch.Tensor] = None, |
|
): |
|
image_features = self.encode_image(image, normalize=True) if image is not None else None |
|
text_features = self.encode_text(text, normalize=True) if text is not None else None |
|
if self.output_dict: |
|
return { |
|
"image_features": image_features, |
|
"text_features": text_features, |
|
"logit_scale": self.logit_scale.exp() |
|
} |
|
return image_features, text_features, self.logit_scale.exp() |
|
|