|
|
from typing import Dict |
|
|
import torch |
|
|
from cde_benchmark.formatters.data_formatter import BaseDataFormatter |
|
|
from cde_benchmark.evaluators.eval_utils import CustomRetrievalEvaluator |
|
|
|
|
|
|
|
|
class Embedder: |
|
|
def __init__( |
|
|
self, |
|
|
is_contextual_model: bool = False, |
|
|
): |
|
|
|
|
|
self.is_contextual_model = is_contextual_model |
|
|
self.evaluator = CustomRetrievalEvaluator() |
|
|
|
|
|
def embed_queries(self, queries): |
|
|
raise NotImplementedError |
|
|
|
|
|
def embed_documents(self, documents): |
|
|
raise NotImplementedError |
|
|
|
|
|
def process_queries(self, data_formatter): |
|
|
queries, document_ids = data_formatter.get_queries() |
|
|
query_embeddings = self.embed_queries(queries) |
|
|
|
|
|
|
|
|
return query_embeddings, document_ids |
|
|
|
|
|
def process_documents(self, data_formatter): |
|
|
if self.is_contextual_model: |
|
|
documents, document_ids = data_formatter.get_nested() |
|
|
|
|
|
doc_embeddings = self.embed_documents(documents) |
|
|
|
|
|
document_ids = [id_ for nested_ids in document_ids for id_ in nested_ids] |
|
|
doc_embeddings = [ |
|
|
embed_ for nested_embeds in doc_embeddings for embed_ in nested_embeds |
|
|
] |
|
|
|
|
|
else: |
|
|
documents, document_ids = data_formatter.get_flattened() |
|
|
doc_embeddings = self.embed_documents(documents) |
|
|
|
|
|
|
|
|
return doc_embeddings, document_ids |
|
|
|
|
|
def get_similarities(self, query_embeddings, doc_embeddings): |
|
|
|
|
|
query_embeddings = torch.tensor(query_embeddings) |
|
|
doc_embeddings = torch.tensor(doc_embeddings) |
|
|
scores = torch.mm(query_embeddings, doc_embeddings.t()) |
|
|
return scores |
|
|
|
|
|
def get_metrics(self, scores, all_document_ids, label_documents_id): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert scores.shape[1] == len(all_document_ids) |
|
|
assert scores.shape[0] == len(label_documents_id) |
|
|
assert set(label_documents_id).issubset(set(all_document_ids)) |
|
|
|
|
|
relevant_docs = {} |
|
|
for idx, label in enumerate(label_documents_id): |
|
|
relevant_docs[str(idx)] = {label: 1} |
|
|
|
|
|
results = {} |
|
|
for idx, scores_per_query in enumerate(scores): |
|
|
results[str(idx)] = { |
|
|
str(doc_id): score.item() |
|
|
for doc_id, score in zip(all_document_ids, scores_per_query) |
|
|
} |
|
|
|
|
|
metrics: Dict[str, float] = self.evaluator.compute_mteb_metrics( |
|
|
relevant_docs, results |
|
|
) |
|
|
return metrics |
|
|
|
|
|
def compute_metrics_e2e(self, data_formatter): |
|
|
queries_embeddings, label_ids = self.process_queries(data_formatter) |
|
|
documents_embeddings, all_doc_ids = self.process_documents(data_formatter) |
|
|
|
|
|
scores = self.get_similarities(queries_embeddings, documents_embeddings) |
|
|
metrics = self.get_metrics(scores, all_doc_ids, label_ids) |
|
|
return metrics |
|
|
|