|
import torch |
|
import torch.nn as nn |
|
import math |
|
from torch.utils.checkpoint import checkpoint |
|
|
|
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, CLIPModel |
|
|
|
import open_clip |
|
import re |
|
from ldm.util import default, count_params |
|
|
|
|
|
class AbstractEncoder(nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
def encode(self, *args, **kwargs): |
|
raise NotImplementedError |
|
|
|
|
|
class IdentityEncoder(AbstractEncoder): |
|
|
|
def encode(self, x): |
|
return x |
|
|
|
|
|
class ClassEmbedder(nn.Module): |
|
def __init__(self, embed_dim, n_classes=1000, key='class'): |
|
super().__init__() |
|
self.key = key |
|
self.embedding = nn.Embedding(n_classes, embed_dim) |
|
|
|
def forward(self, batch, key=None): |
|
if key is None: |
|
key = self.key |
|
|
|
c = batch[key][:, None] |
|
c = self.embedding(c) |
|
return c |
|
|
|
|
|
class FrozenT5Embedder(AbstractEncoder): |
|
"""Uses the T5 transformer encoder for text""" |
|
def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): |
|
super().__init__() |
|
self.tokenizer = T5Tokenizer.from_pretrained(version) |
|
self.transformer = T5EncoderModel.from_pretrained(version) |
|
self.device = device |
|
self.max_length = max_length |
|
if freeze: |
|
self.freeze() |
|
|
|
def freeze(self): |
|
self.transformer = self.transformer.eval() |
|
|
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def forward(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.device) |
|
outputs = self.transformer(input_ids=tokens) |
|
|
|
z = outputs.last_hidden_state |
|
return z |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
|
|
class FrozenCLIPEmbedder(AbstractEncoder): |
|
"""Uses the CLIP transformer encoder for text (from huggingface)""" |
|
def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77, |
|
freeze=True, layer="last"): |
|
super().__init__() |
|
self.tokenizer = CLIPTokenizer.from_pretrained(version) |
|
self.transformer = CLIPModel.from_pretrained(version).text_model |
|
self.device = device |
|
self.max_length = max_length |
|
if freeze: |
|
self.freeze() |
|
self.layer = layer |
|
|
|
def freeze(self): |
|
self.transformer = self.transformer.eval() |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def forward(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.device) |
|
outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer != 'last') |
|
|
|
if self.layer == 'penultimate': |
|
z = outputs.hidden_states[-2] |
|
z = self.transformer.final_layer_norm(z) |
|
else: |
|
z = outputs.last_hidden_state |
|
return z |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
|
|
class FrozenOpenCLIPEmbedder(AbstractEncoder): |
|
""" |
|
Uses the OpenCLIP transformer encoder for text |
|
""" |
|
LAYERS = [ |
|
|
|
"last", |
|
"penultimate" |
|
] |
|
def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77, |
|
freeze=True, layer="last"): |
|
super().__init__() |
|
assert layer in self.LAYERS |
|
model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version) |
|
del model.visual |
|
self.model = model |
|
|
|
self.device = device |
|
self.max_length = max_length |
|
if freeze: |
|
self.freeze() |
|
self.layer = layer |
|
if self.layer == "last": |
|
self.layer_idx = 0 |
|
elif self.layer == "penultimate": |
|
self.layer_idx = 1 |
|
else: |
|
raise NotImplementedError() |
|
|
|
def freeze(self): |
|
self.model = self.model.eval() |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def forward(self, text): |
|
tokens = open_clip.tokenize(text) |
|
z = self.encode_with_transformer(tokens.to(self.device)) |
|
return z |
|
|
|
def encode_with_transformer(self, text): |
|
x = self.model.token_embedding(text) |
|
x = x + self.model.positional_embedding |
|
x = x.permute(1, 0, 2) |
|
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) |
|
x = x.permute(1, 0, 2) |
|
x = self.model.ln_final(x) |
|
return x |
|
|
|
def text_transformer_forward(self, x: torch.Tensor, attn_mask = None): |
|
for i, r in enumerate(self.model.transformer.resblocks): |
|
if i == len(self.model.transformer.resblocks) - self.layer_idx: |
|
break |
|
if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting(): |
|
x = checkpoint(r, x, attn_mask) |
|
else: |
|
x = r(x, attn_mask=attn_mask) |
|
return x |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
|
|
class FrozenCLIPT5Encoder(AbstractEncoder): |
|
def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda", |
|
clip_max_length=77, t5_max_length=77): |
|
super().__init__() |
|
self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length) |
|
self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) |
|
print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, " |
|
f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.") |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
def forward(self, text): |
|
clip_z = self.clip_encoder.encode(text) |
|
t5_z = self.t5_encoder.encode(text) |
|
return [clip_z, t5_z] |
|
|
|
|
|
|
|
re_attention = re.compile(r""" |
|
\\\(| |
|
\\\)| |
|
\\\[| |
|
\\]| |
|
\\\\| |
|
\\| |
|
\(| |
|
\[| |
|
:([+-]?[.\d]+)\)| |
|
\)| |
|
]| |
|
[^\\()\[\]:]+| |
|
: |
|
""", re.X) |
|
|
|
|
|
def parse_prompt_attention(text): |
|
""" |
|
Parses a string with attention tokens and returns a list of pairs: text and its associated weight. |
|
Accepted tokens are: |
|
(abc) - increases attention to abc by a multiplier of 1.1 |
|
(abc:3.12) - increases attention to abc by a multiplier of 3.12 |
|
[abc] - decreases attention to abc by a multiplier of 1.1 |
|
\( - literal character '(' |
|
\[ - literal character '[' |
|
\) - literal character ')' |
|
\] - literal character ']' |
|
\\ - literal character '\' |
|
anything else - just text |
|
|
|
>>> parse_prompt_attention('normal text') |
|
[['normal text', 1.0]] |
|
>>> parse_prompt_attention('an (important) word') |
|
[['an ', 1.0], ['important', 1.1], [' word', 1.0]] |
|
>>> parse_prompt_attention('(unbalanced') |
|
[['unbalanced', 1.1]] |
|
>>> parse_prompt_attention('\(literal\]') |
|
[['(literal]', 1.0]] |
|
>>> parse_prompt_attention('(unnecessary)(parens)') |
|
[['unnecessaryparens', 1.1]] |
|
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') |
|
[['a ', 1.0], |
|
['house', 1.5730000000000004], |
|
[' ', 1.1], |
|
['on', 1.0], |
|
[' a ', 1.1], |
|
['hill', 0.55], |
|
[', sun, ', 1.1], |
|
['sky', 1.4641000000000006], |
|
['.', 1.1]] |
|
""" |
|
|
|
res = [] |
|
round_brackets = [] |
|
square_brackets = [] |
|
|
|
round_bracket_multiplier = 1.1 |
|
square_bracket_multiplier = 1 / 1.1 |
|
|
|
def multiply_range(start_position, multiplier): |
|
for p in range(start_position, len(res)): |
|
res[p][1] *= multiplier |
|
|
|
for m in re_attention.finditer(text): |
|
text = m.group(0) |
|
weight = m.group(1) |
|
|
|
if text.startswith('\\'): |
|
res.append([text[1:], 1.0]) |
|
elif text == '(': |
|
round_brackets.append(len(res)) |
|
elif text == '[': |
|
square_brackets.append(len(res)) |
|
elif weight is not None and len(round_brackets) > 0: |
|
multiply_range(round_brackets.pop(), float(weight)) |
|
elif text == ')' and len(round_brackets) > 0: |
|
multiply_range(round_brackets.pop(), round_bracket_multiplier) |
|
elif text == ']' and len(square_brackets) > 0: |
|
multiply_range(square_brackets.pop(), square_bracket_multiplier) |
|
else: |
|
res.append([text, 1.0]) |
|
|
|
for pos in round_brackets: |
|
multiply_range(pos, round_bracket_multiplier) |
|
|
|
for pos in square_brackets: |
|
multiply_range(pos, square_bracket_multiplier) |
|
|
|
if len(res) == 0: |
|
res = [["", 1.0]] |
|
|
|
|
|
i = 0 |
|
while i + 1 < len(res): |
|
if res[i][1] == res[i + 1][1]: |
|
res[i][0] += res[i + 1][0] |
|
res.pop(i + 1) |
|
else: |
|
i += 1 |
|
|
|
return res |
|
|
|
class WebUIFrozenCLIPEmebedder(AbstractEncoder): |
|
def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", freeze=True, layer="penultimate"): |
|
super(WebUIFrozenCLIPEmebedder, self).__init__() |
|
self.tokenizer = CLIPTokenizer.from_pretrained(version) |
|
self.transformer = CLIPModel.from_pretrained(version).text_model |
|
self.device = device |
|
self.layer = layer |
|
if freeze: |
|
self.freeze() |
|
|
|
self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ',</w>'][0] |
|
self.comma_padding_backtrack = 20 |
|
|
|
def freeze(self): |
|
self.transformer = self.transformer.eval() |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def tokenize(self, texts): |
|
tokenized = self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] |
|
return tokenized |
|
|
|
def encode_with_transformers(self, tokens): |
|
outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer!='last') |
|
|
|
if self.layer == 'penultimate': |
|
z = outputs.hidden_states[-2] |
|
z = self.transformer.final_layer_norm(z) |
|
else: |
|
z = outputs.last_hidden_state |
|
|
|
return z |
|
|
|
def tokenize_line(self, line): |
|
parsed = parse_prompt_attention(line) |
|
|
|
|
|
tokenized = self.tokenize([text for text, _ in parsed]) |
|
|
|
remade_tokens = [] |
|
multipliers = [] |
|
last_comma = -1 |
|
|
|
for tokens, (text, weight) in zip(tokenized, parsed): |
|
i = 0 |
|
while i < len(tokens): |
|
token = tokens[i] |
|
|
|
if token == self.comma_token: |
|
last_comma = len(remade_tokens) |
|
elif self.comma_padding_backtrack != 0 and max(len(remade_tokens), |
|
1) % 75 == 0 and last_comma != -1 and len( |
|
remade_tokens) - last_comma <= self.comma_padding_backtrack: |
|
last_comma += 1 |
|
reloc_tokens = remade_tokens[last_comma:] |
|
reloc_mults = multipliers[last_comma:] |
|
|
|
remade_tokens = remade_tokens[:last_comma] |
|
length = len(remade_tokens) |
|
|
|
rem = int(math.ceil(length / 75)) * 75 - length |
|
remade_tokens += [self.tokenizer.eos_token_id] * rem + reloc_tokens |
|
multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults |
|
|
|
remade_tokens.append(token) |
|
multipliers.append(weight) |
|
i += 1 |
|
|
|
token_count = len(remade_tokens) |
|
prompt_target_length = math.ceil(max(token_count, 1) / 75) * 75 |
|
tokens_to_add = prompt_target_length - len(remade_tokens) |
|
|
|
remade_tokens = remade_tokens + [self.tokenizer.eos_token_id] * tokens_to_add |
|
multipliers = multipliers + [1.0] * tokens_to_add |
|
|
|
return remade_tokens, multipliers, token_count |
|
|
|
def process_text(self, texts): |
|
remade_batch_tokens = [] |
|
token_count = 0 |
|
|
|
cache = {} |
|
batch_multipliers = [] |
|
for line in texts: |
|
if line in cache: |
|
remade_tokens, multipliers = cache[line] |
|
else: |
|
remade_tokens, multipliers, current_token_count = self.tokenize_line(line) |
|
token_count = max(current_token_count, token_count) |
|
|
|
cache[line] = (remade_tokens, multipliers) |
|
|
|
remade_batch_tokens.append(remade_tokens) |
|
batch_multipliers.append(multipliers) |
|
|
|
return batch_multipliers, remade_batch_tokens, token_count |
|
|
|
def process_tokens(self, remade_batch_tokens, batch_multipliers): |
|
remade_batch_tokens = [[self.tokenizer.bos_token_id] + x[:75] + [self.tokenizer.eos_token_id] for x in remade_batch_tokens] |
|
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers] |
|
|
|
tokens = torch.asarray(remade_batch_tokens).to(self.device) |
|
|
|
z = self.encode_with_transformers(tokens) |
|
|
|
|
|
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] |
|
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(self.device) |
|
original_mean = z.mean() |
|
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) |
|
new_mean = z.mean() |
|
z *= original_mean / new_mean |
|
|
|
return z |
|
|
|
def forward(self, text): |
|
batch_multipliers, remade_batch_tokens, token_count = self.process_text(text) |
|
|
|
z = None |
|
i = 0 |
|
while max(map(len, remade_batch_tokens)) != 0: |
|
rem_tokens = [x[75:] for x in remade_batch_tokens] |
|
rem_multipliers = [x[75:] for x in batch_multipliers] |
|
|
|
tokens = [] |
|
multipliers = [] |
|
for j in range(len(remade_batch_tokens)): |
|
if len(remade_batch_tokens[j]) > 0: |
|
tokens.append(remade_batch_tokens[j][:75]) |
|
multipliers.append(batch_multipliers[j][:75]) |
|
else: |
|
tokens.append([self.tokenizer.eos_token_id] * 75) |
|
multipliers.append([1.0] * 75) |
|
|
|
z1 = self.process_tokens(tokens, multipliers) |
|
z = z1 if z is None else torch.cat((z, z1), axis=-2) |
|
|
|
remade_batch_tokens = rem_tokens |
|
batch_multipliers = rem_multipliers |
|
i += 1 |
|
|
|
return z |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
model = FrozenCLIPEmbedder() |
|
count_params(model, verbose=True) |
|
|