baselines-v2 / cde_benchmark /embedders /jina_late_chunking_embedder.py
manu's picture
Upload folder using huggingface_hub
545c4d5 verified
import numpy as np
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from cde_benchmark.embedders.base_embedder import Embedder
class LateChunkingEmbedder(Embedder):
def __init__(
self,
model: SentenceTransformer = None,
batch_size: int = 16,
show_progress_bar: bool = True,
):
super().__init__(is_contextual_model=True)
self.model: SentenceTransformer = model
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.sep_token = self.model.tokenizer.sep_token
def embed_queries(self, queries):
return self.model.encode(
queries,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
)
def embed_documents(self, documents):
# documents is a list of list of documents
# This is just for the demo, but here it's not contextual at all
embeddings = []
for document in tqdm(documents):
doc = self.sep_token + f"{self.sep_token}".join(document)
encodings = self.model.tokenizer(
[doc],
max_length=8192,
truncation=True,
padding=True,
return_tensors="pt",
).to(self.model.device)
# split the model outputs on the [SEP] token
sep_indices = (
encodings["input_ids"] == self.model.tokenizer.sep_token_id
).nonzero(as_tuple=True)[1]
# assert sep_token is at the end
assert (sep_indices[-1] == encodings.input_ids.shape[1] - 1).item()
if len(document) != len(sep_indices) - 1:
print(f"Warning: number of documents ({len(document)}) does not match number of [SEP] tokens - 1 ({len(sep_indices)}), indicating document was too long and was truncated")
print(f"The length of the document was {len(doc)} with {len(encodings.input_ids[0])} tokens while model max_length is {8192}")
breakpoint()
model_outputs = (
self.model._modules["0"].auto_model(**encodings).last_hidden_state
)
tmp_embeddings = []
for i in range(len(sep_indices) - 1):
# normalize embeddings
tmp_embeddings.append(
model_outputs[
0,
sep_indices[i] + 1 : sep_indices[i + 1],
:,
]
.mean(dim=0)
.detach()
.cpu()
.numpy()
)
# concatenate embeddings
tmp_embeddings = np.array(tmp_embeddings)
# normalize embeddings
tmp_embeddings = (
tmp_embeddings / np.linalg.norm(tmp_embeddings, axis=1)[:, None]
)
embeddings.append(tmp_embeddings)
return embeddings