|
import json |
|
from functools import lru_cache |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
SUPPORTED_LANGUAGES = [ |
|
"sl", |
|
"ur", |
|
"sw", |
|
"uz", |
|
"vi", |
|
"sq", |
|
"ms", |
|
"km", |
|
"hy", |
|
"da", |
|
"ky", |
|
"mg", |
|
"mn", |
|
"ja", |
|
"el", |
|
"it", |
|
"is", |
|
"ru", |
|
"tl", |
|
"so", |
|
"pt", |
|
"uk", |
|
"sr", |
|
"sn", |
|
"ht", |
|
"bs", |
|
"my", |
|
"ar", |
|
"hr", |
|
"nl", |
|
"bn", |
|
"ne", |
|
"hi", |
|
"ka", |
|
"az", |
|
"ko", |
|
"id", |
|
"fr", |
|
"es", |
|
"en", |
|
"fa", |
|
"lo", |
|
"iw", |
|
"th", |
|
"tr", |
|
"zht", |
|
"zhs", |
|
"ti", |
|
"tg", |
|
"control", |
|
] |
|
SYSTEMS = ["openai", "m3"] |
|
MODES = ["qlang", "qlang_en", "en", "rel_langs"] |
|
|
|
|
|
|
|
ROOT_DIR = "data" |
|
|
|
|
|
class BordIRlinesConfig(datasets.BuilderConfig): |
|
def __init__(self, language, n_hits=10, **kwargs): |
|
super(BordIRlinesConfig, self).__init__(**kwargs) |
|
self.language = language |
|
self.n_hits = n_hits |
|
self.data_root_dir = ROOT_DIR |
|
|
|
|
|
def load_json(path): |
|
with open(path, "r", encoding="utf-8") as f: |
|
return json.load(f) |
|
|
|
|
|
@lru_cache |
|
def replace_lang_str(path, lang): |
|
parent = path.rsplit("/", 2)[0] |
|
return f"{parent}/{lang}/{lang}_docs.json" |
|
|
|
|
|
class BordIRLinesDataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
BordIRlinesConfig( |
|
name=lang, |
|
language=lang, |
|
description=f"{lang.upper()} dataset", |
|
) |
|
for lang in SUPPORTED_LANGUAGES |
|
] |
|
|
|
def __init__(self, *args, relevance_filter="all", annotation_type=None, llm_mode="fewshot", viewpoint_filter=None, **kwargs): |
|
super().__init__(*args, **kwargs) |
|
self.relevance_filter = relevance_filter |
|
self.annotation_type = annotation_type |
|
self.llm_mode = llm_mode |
|
self.viewpoint_filter = viewpoint_filter |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="IR Dataset for BordIRLines paper.", |
|
features=datasets.Features( |
|
{ |
|
"query_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"query_lang": datasets.Value("string"), |
|
"territory": datasets.Value("string"), |
|
"rank": datasets.Value("int32"), |
|
"score": datasets.Value("float32"), |
|
"doc_id": datasets.Value("string"), |
|
"doc_text": datasets.Value("string"), |
|
"doc_lang": datasets.Value("string"), |
|
"relevant_human": datasets.Value("bool"), |
|
"viewpoint": datasets.Value("string"), |
|
"relevant_llm_zeroshot": datasets.Value("bool"), |
|
"relevant_llm_fewshot": datasets.Value("bool"), |
|
} |
|
), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
base_url = self.config.data_root_dir |
|
queries_path = f"{base_url}/queries.tsv" |
|
docs_path = dl_manager.download_and_extract(f"{base_url}/all_docs.json") |
|
human_annotations_path = dl_manager.download_and_extract(f"{base_url}/human_annotations.tsv") |
|
llm_annotations_path = dl_manager.download_and_extract(f"{base_url}/llm_annotations.tsv") |
|
|
|
lang = self.config.language |
|
|
|
splits = [] |
|
downloaded_data = {} |
|
|
|
for system in SYSTEMS: |
|
for mode in MODES: |
|
source = f"{system}.{mode}" |
|
downloaded_data[source] = dl_manager.download_and_extract( |
|
{ |
|
"hits": f"{base_url}/{lang}/{system}/{mode}/{lang}_query_hits.tsv", |
|
"docs": docs_path, |
|
"queries": queries_path, |
|
"human_annotations": human_annotations_path, |
|
"llm_annotations": llm_annotations_path, |
|
} |
|
) |
|
|
|
split = datasets.SplitGenerator( |
|
name=f"{system}.{mode}", |
|
gen_kwargs={ |
|
"hits_path": downloaded_data[source]["hits"], |
|
"docs_path": downloaded_data[source]["docs"], |
|
"queries_path": downloaded_data[source]["queries"], |
|
"human_annotations_path": downloaded_data[source]["human_annotations"], |
|
"llm_annotations_path": downloaded_data[source]["llm_annotations"], |
|
}, |
|
) |
|
splits.append(split) |
|
|
|
return splits |
|
|
|
def _generate_examples(self, hits_path, docs_path, queries_path, human_annotations_path, llm_annotations_path): |
|
n_hits = self.config.n_hits |
|
queries_df = pd.read_csv(queries_path, sep="\t") |
|
query_map = dict(zip(queries_df["query_id"], queries_df["query_text"])) |
|
query_to_lang_map = dict(zip(queries_df["query_id"], queries_df["language"])) |
|
counter = 0 |
|
|
|
docs = load_json(docs_path) |
|
|
|
hits = pd.read_csv(hits_path, sep="\t") |
|
human_annotations = pd.read_csv(human_annotations_path, sep="\t") |
|
llm_annotations = pd.read_csv(llm_annotations_path, sep="\t") |
|
|
|
if n_hits: |
|
hits = hits.groupby("query_id").head(n_hits) |
|
|
|
|
|
hits["query_id_int"] = hits["query_id"].str[1:].astype(int) |
|
hits = hits.sort_values(by=["query_id_int", "rank"]) |
|
hits = hits.drop(columns=["query_id_int"]) |
|
|
|
human_map = human_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index") |
|
llm_map = llm_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index") |
|
|
|
for _, row in hits.iterrows(): |
|
doc_id = row["doc_id"] |
|
doc_lang = row["doc_lang"] |
|
query_id = row["query_id"] |
|
query_text = query_map[query_id] |
|
query_lang = query_to_lang_map[query_id] |
|
|
|
|
|
human_data = human_map.get((query_id, doc_id), {}) |
|
|
|
relevant_human = human_data.get("relevant", False) |
|
viewpoint_human = human_data.get("territory", "") |
|
|
|
|
|
llm_data = llm_map.get((query_id, doc_id), {}) |
|
relevant_llm = ( |
|
llm_data.get("relevant_fewshot", None) |
|
if self.llm_mode == "fewshot" |
|
else llm_data.get("relevant_zeroshot", None) |
|
) |
|
viewpoint = viewpoint_human |
|
if self.viewpoint_filter and self.viewpoint_filter not in viewpoint: |
|
continue |
|
|
|
if self.relevance_filter == "relevant": |
|
if self.annotation_type == "human" and not relevant_human: |
|
continue |
|
elif self.annotation_type == "llm" and not (relevant_llm is True): |
|
continue |
|
elif not relevant_human and not (relevant_llm is True): |
|
continue |
|
|
|
elif self.relevance_filter == "non-relevant": |
|
if self.annotation_type == "human" and relevant_human: |
|
continue |
|
elif self.annotation_type == "llm" and relevant_llm is True: |
|
continue |
|
elif relevant_human or relevant_llm is True: |
|
continue |
|
|
|
|
|
|
|
yield ( |
|
counter, |
|
{ |
|
"query_id": query_id, |
|
"query": query_text, |
|
"query_lang": query_lang, |
|
"territory": row["territory"], |
|
"rank": row["rank"], |
|
"score": row["score"], |
|
"doc_id": doc_id, |
|
"doc_text": docs[doc_lang][doc_id], |
|
"doc_lang": doc_lang, |
|
"relevant_human": relevant_human, |
|
"viewpoint": viewpoint, |
|
"relevant_llm_zeroshot": llm_data.get("relevant_zeroshot", None), |
|
"relevant_llm_fewshot": llm_data.get("relevant_fewshot", None), |
|
}, |
|
) |
|
counter += 1 |
|
|