|
import json |
|
import tqdm |
|
import torch |
|
import numpy |
|
import datasets |
|
import argparse |
|
import transformers |
|
from sentence_transformers import SentenceTransformer |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--language") |
|
parser.add_argument("--limit", type=int) |
|
parser.add_argument("--model", default="paraphrase-multilingual-mpnet-base-v2") |
|
args = parser.parse_args() |
|
|
|
|
|
torch.set_grad_enabled(False) |
|
model = SentenceTransformer(args.model) |
|
|
|
|
|
def encode(batch): |
|
output = model.encode(batch["question"], batch_size=len(batch["question"])) |
|
return {"embeddings": output} |
|
|
|
|
|
dataset = datasets.load_dataset("mcqa_light.py", language=args.language, negative=False, split="train") |
|
if args.limit: |
|
dataset = dataset.select(range(args.limit)) |
|
dataset = dataset.map(encode, batched=True, batch_size=1000, desc="encode") |
|
dataset.add_faiss_index(column='embeddings') |
|
|
|
|
|
def retrieve(batch): |
|
output = model.encode(batch["question"], batch_size=len(batch["question"])) |
|
_, retrieved_examples = dataset.get_nearest_examples_batch('embeddings', output, k=5) |
|
return {"negative": [e["answer"] for e in retrieved_examples]} |
|
|
|
|
|
def filter_negative(example): |
|
example["negative"] = [e for e in example["negative"] if e != example["answer"]] |
|
return example |
|
|
|
|
|
dataset = dataset.map(retrieve, batched=True, batch_size=1000, desc="retrieve", remove_columns=["embeddings"]) |
|
dataset = dataset.map(filter_negative, desc="filter") |
|
|
|
|
|
with open(f"data/{args.language}.neg.json", "w+") as f: |
|
for element in tqdm.tqdm(dataset, desc="write"): |
|
f.write(json.dumps(element, ensure_ascii=False) + "\n") |
|
|