File size: 1,611 Bytes
5b2804d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91ff4e0
5b2804d
 
 
 
 
 
 
 
 
 
 
 
 
2404fb0
5b2804d
 
 
91ff4e0
5b2804d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import json
import tqdm
import torch
import numpy
import datasets
import argparse
import transformers
from sentence_transformers import SentenceTransformer


parser = argparse.ArgumentParser()
parser.add_argument("--language")
parser.add_argument("--limit", type=int)
parser.add_argument("--model", default="paraphrase-multilingual-mpnet-base-v2")
args = parser.parse_args()


torch.set_grad_enabled(False)
model = SentenceTransformer(args.model)


def encode(batch):
    output = model.encode(batch["question"], batch_size=len(batch["question"]))
    return {"embeddings": output}


dataset = datasets.load_dataset("mcqa_light.py", language=args.language, negative=False, split="train")
if args.limit:
    dataset = dataset.select(range(args.limit))
dataset = dataset.map(encode, batched=True, batch_size=1000, desc="encode")
dataset.add_faiss_index(column='embeddings')


def retrieve(batch):
    output = model.encode(batch["question"], batch_size=len(batch["question"]))
    _, retrieved_examples = dataset.get_nearest_examples_batch('embeddings', output, k=5)
    return {"negative": [e["answer"] for e in retrieved_examples]} 


def filter_negative(example):
    example["negative"] = [e for e in example["negative"] if e != example["answer"]]
    return example


dataset = dataset.map(retrieve, batched=True, batch_size=1000, desc="retrieve", remove_columns=["embeddings"])
dataset = dataset.map(filter_negative, desc="filter")


with open(f"data/{args.language}.neg.json", "w+") as f:
    for element in tqdm.tqdm(dataset, desc="write"):
        f.write(json.dumps(element, ensure_ascii=False) + "\n")