|
import argparse |
|
import logging |
|
import pickle |
|
from typing import Any |
|
from omegaconf import DictConfig |
|
from tqdm import tqdm |
|
from pathlib import Path |
|
|
|
import numpy as np |
|
import torch |
|
import transformers |
|
from vllm import LLM |
|
from transformers import AutoTokenizer, AutoModel |
|
from sentence_transformers import SentenceTransformer |
|
|
|
import contriever.src.slurm |
|
import contriever.src.contriever |
|
import contriever.src.utils |
|
import contriever.src.normalize_text |
|
|
|
from src.data import fast_load_jsonl_shard |
|
import os |
|
|
|
|
|
def get_model(args: DictConfig): |
|
model_name_or_path: str = args.model_name_or_path |
|
logging.info(f"Loading retriever model from {model_name_or_path}...") |
|
|
|
if args.get("use_vllm", False): |
|
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) |
|
os.environ["VLLM_ATTENTION_BACKEND"] = "XFORMERS" |
|
model = LLM( |
|
model=model_name_or_path, |
|
dtype="auto", |
|
task="embed", |
|
enforce_eager=True, |
|
) |
|
return model, tokenizer |
|
|
|
if "contriever" in model_name_or_path: |
|
model, tokenizer, _ = contriever.src.contriever.load_retriever( |
|
model_name_or_path |
|
) |
|
model = model.cuda() |
|
if not args.no_fp16: |
|
model = model.half() |
|
model.eval() |
|
elif "dragon" in model_name_or_path: |
|
tokenizer_name_or_path = ( |
|
args.tokenizer if args.get("tokenizer", None) else model_name_or_path |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path) |
|
model = AutoModel.from_pretrained(model_name_or_path) |
|
model = model.cuda() |
|
if not args.no_fp16: |
|
model = model.half() |
|
model.eval() |
|
elif "sentence-transformers" in model_name_or_path: |
|
tokenizer = None |
|
model = SentenceTransformer(model_name_or_path) |
|
model.eval() |
|
else: |
|
raise AttributeError(f"{model_name_or_path} is not supported!") |
|
|
|
return model, tokenizer |
|
|
|
|
|
def generate_passage_embeddings(cfg: DictConfig): |
|
if "sparse_retriever" not in cfg.model: |
|
print(f"No need to run the embedding step for sparse retrieval, skipping...") |
|
return |
|
|
|
args: DictConfig = cfg.datastore.embedding |
|
model, tokenizer = get_model(args) |
|
|
|
for shard_id in map(int, args.shard_ids): |
|
embedding_shard_save_path: Path = Path(args.embedding_dir) / ( |
|
args.prefix + f"_{shard_id:02d}.pkl" |
|
) |
|
|
|
if args.get("use_saved_if_exists", True) and embedding_shard_save_path.exists(): |
|
print(f"Embeddings exist in {embedding_shard_save_path}") |
|
continue |
|
|
|
shard_passages = fast_load_jsonl_shard(args, shard_id, return_all_passages=True) |
|
all_ids, all_embeddings = embed_passages(args, shard_passages, model, tokenizer) |
|
assert all_embeddings[0].shape == (cfg.datastore.index.projection_size,), ( |
|
f"Embedding shape is {all_embeddings[0].shape}, while index requires {cfg.datastore.index.projection_size}" |
|
) |
|
|
|
Path(args.embedding_dir).mkdir(parents=True, exist_ok=True) |
|
print( |
|
f"Saving {len(all_ids)} passage embeddings to {embedding_shard_save_path}." |
|
) |
|
with open(embedding_shard_save_path, mode="wb") as file: |
|
pickle.dump((all_ids, all_embeddings), file) |
|
print( |
|
f"Processed {len(all_ids)} passages in the {shard_id}-th (out of {args.num_shards}) shard.\n" |
|
f"Written to {embedding_shard_save_path}." |
|
) |
|
|
|
|
|
def embed_passages( |
|
args: DictConfig, |
|
passages: list[dict[str, Any]], |
|
model: Any, |
|
tokenizer: transformers.AutoTokenizer, |
|
) -> tuple[list[int], list[np.ndarray]]: |
|
def preprocess_text(p: dict[str, Any]) -> str: |
|
if args.no_title or "title" not in p: |
|
text: str = p["text"] |
|
else: |
|
text: str = p["title"] + " " + p["text"] |
|
if args.lowercase: |
|
text = text.lower() |
|
if args.normalize_text: |
|
text = contriever.src.normalize_text.normalize(text) |
|
|
|
if "GritLM" in args.model_name_or_path: |
|
text = "<|embed|>\n" + text |
|
|
|
return text |
|
|
|
all_ids: list[int] = [] |
|
all_embeddings: list[np.ndarray] = [] |
|
|
|
if "sentence-transformers" in args.model_name_or_path: |
|
all_texts: list[str] = [] |
|
|
|
for passage in tqdm(passages): |
|
all_ids.append(passage["id"]) |
|
all_texts.append(preprocess_text(passage)) |
|
|
|
with torch.no_grad(): |
|
all_embeddings = model.encode( |
|
all_texts, batch_size=64 |
|
) |
|
|
|
else: |
|
if args.get("use_vllm", False): |
|
BATCH_SIZE = args.per_gpu_batch_size |
|
for batch_idx in tqdm(range(0, len(passages), BATCH_SIZE)): |
|
batch = passages[batch_idx : batch_idx + BATCH_SIZE] |
|
batch_ids = [p["id"] for p in batch] |
|
batch_texts = [preprocess_text(p) for p in batch] |
|
|
|
outputs = model.embed(batch_texts) |
|
batch_embeddings = [output.outputs.embedding for output in outputs] |
|
|
|
|
|
batch_embeddings = [ |
|
embedding / np.linalg.norm(embedding) |
|
for embedding in batch_embeddings |
|
] |
|
batch_embeddings = np.array(batch_embeddings) |
|
all_ids.extend(batch_ids) |
|
all_embeddings.append(np.array(batch_embeddings)) |
|
|
|
all_embeddings = np.concatenate(all_embeddings, axis=0) |
|
else: |
|
BATCH_SIZE = args.per_gpu_batch_size |
|
for batch_idx in tqdm(range(0, len(passages), BATCH_SIZE)): |
|
batch = passages[batch_idx : batch_idx + BATCH_SIZE] |
|
batch_ids = [p["id"] for p in batch] |
|
batch_texts = [preprocess_text(p) for p in batch] |
|
|
|
with torch.no_grad(): |
|
encoded_batch = tokenizer.batch_encode_plus( |
|
batch_texts, |
|
return_tensors="pt", |
|
max_length=args.passage_maxlength, |
|
padding=True, |
|
truncation=True, |
|
) |
|
encoded_batch = {k: v.cuda() for k, v in encoded_batch.items()} |
|
batch_embeddings = model( |
|
**encoded_batch |
|
) |
|
|
|
if "contriever" not in args.model_name_or_path: |
|
|
|
batch_embeddings = batch_embeddings.last_hidden_state[:, 0, :] |
|
|
|
batch_embeddings = batch_embeddings.cpu() |
|
|
|
all_ids.extend(batch_ids) |
|
all_embeddings.append(batch_embeddings) |
|
|
|
all_embeddings = torch.cat(all_embeddings, dim=0).numpy() |
|
|
|
return all_ids, all_embeddings |
|
|
|
|
|
def get_sharded_passages(args, all_passages): |
|
total_num_passages = len(all_passages) |
|
shard_size = total_num_passages // args.num_shards |
|
start_idx = args.shard_id * shard_size |
|
end_idx = start_idx + shard_size |
|
if args.shard_id == args.num_shards - 1: |
|
end_idx = total_num_passages |
|
|
|
passages = all_passages[start_idx:end_idx] |
|
print(f"Using {len(passages)} passages from idx {start_idx} to {end_idx}.") |
|
return passages |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
|
|
parser.add_argument( |
|
"--raw_data_path", |
|
type=str, |
|
default=None, |
|
help="Path to passages (.jsonl or .tsv file)", |
|
) |
|
parser.add_argument( |
|
"--embedding_dir", |
|
type=str, |
|
default="wikipedia_embeddings", |
|
help="dir path to save embeddings", |
|
) |
|
parser.add_argument( |
|
"--prefix", type=str, default="passages", help="prefix path to save embeddings" |
|
) |
|
parser.add_argument( |
|
"--shard_id", type=int, default=0, help="Id of the current shard" |
|
) |
|
parser.add_argument( |
|
"--num_shards", type=int, default=1, help="Total number of shards" |
|
) |
|
parser.add_argument( |
|
"--per_gpu_batch_size", |
|
type=int, |
|
default=512, |
|
help="Batch size for the passage encoder forward pass", |
|
) |
|
parser.add_argument( |
|
"--chunk_size", |
|
type=int, |
|
default=512, |
|
help="Maximum number of words in a passage, the length will be further cut by passage_maxlength", |
|
) |
|
parser.add_argument( |
|
"--passage_maxlength", |
|
type=int, |
|
default=512, |
|
help="Maximum number of tokens in a passage", |
|
) |
|
parser.add_argument( |
|
"--model_name_or_path", |
|
type=str, |
|
help="path to directory containing model weights and config file", |
|
) |
|
parser.add_argument("--no_fp16", action="store_true", help="inference in fp32") |
|
parser.add_argument( |
|
"--no_title", action="store_true", help="title not added to the passage body" |
|
) |
|
parser.add_argument( |
|
"--lowercase", action="store_true", help="lowercase text before encoding" |
|
) |
|
parser.add_argument( |
|
"--normalize_text", action="store_true", help="lowercase text before encoding" |
|
) |
|
parser.add_argument( |
|
"--use_vllm", action="store_true", help="use vllm for embedding" |
|
) |
|
args = parser.parse_args() |
|
|
|
generate_passage_embeddings(DictConfig(args)) |
|
|