Datasets:

ArXiv:
License:
bordirlines / bordirlines.py
manestay's picture
properly skip filters if not passed in, refactor logic
22152b8
import json
from copy import copy
from functools import lru_cache
import datasets
import pandas as pd
SUPPORTED_LANGUAGES = [
"sl",
"ur",
"sw",
"uz",
"vi",
"sq",
"ms",
"km",
"hy",
"da",
"ky",
"mg",
"mn",
"ja",
"el",
"it",
"is",
"ru",
"tl",
"so",
"pt",
"uk",
"sr",
"sn",
"ht",
"bs",
"my",
"ar",
"hr",
"nl",
"bn",
"ne",
"hi",
"ka",
"az",
"ko",
"id",
"fr",
"es",
"en",
"fa",
"lo",
"iw",
"th",
"tr",
"zht",
"zhs",
"ti",
"tg",
"control",
]
SYSTEMS = ["openai", "m3"]
MODES = ["qlang", "qlang_en", "en", "rel_langs"]
RELEVANCE_FILTERS = ["all", "relevant", "non-relevant"]
LLM_MODES = ["zeroshot", "fewshot"]
ROOT_DIR = "data"
class BordIRlinesConfig(datasets.BuilderConfig):
def __init__(self, language, n_hits=10, **kwargs):
super(BordIRlinesConfig, self).__init__(**kwargs)
self.language = language
self.n_hits = n_hits
self.data_root_dir = ROOT_DIR
def load_json(path):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
@lru_cache
def replace_lang_str(path, lang):
parent = path.rsplit("/", 2)[0]
return f"{parent}/{lang}/{lang}_docs.json"
def get_label(human_bool, llm_bool, annotation_type):
if annotation_type == "human":
return human_bool
elif annotation_type == "llm":
return llm_bool
else:
return human_bool if human_bool is not None else llm_bool
class BordIRLinesDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
BordIRlinesConfig(
name=lang,
language=lang,
description=f"{lang.upper()} dataset",
)
for lang in SUPPORTED_LANGUAGES
]
def __init__(
self,
*args,
relevance_filter="all",
annotation_type=None,
llm_mode="fewshot",
viewpoint_filter=None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.relevance_filter = relevance_filter
assert self.relevance_filter in RELEVANCE_FILTERS
self.annotation_type = annotation_type
self.llm_mode = llm_mode
assert self.llm_mode in LLM_MODES
self.viewpoint_filter = viewpoint_filter # Filter for a specific viewpoint
def _info(self):
return datasets.DatasetInfo(
description="IR Dataset for BordIRLines paper.",
features=datasets.Features(
{
"query_id": datasets.Value("string"),
"query": datasets.Value("string"),
"query_lang": datasets.Value("string"),
"territory": datasets.Value("string"),
"rank": datasets.Value("int32"),
"score": datasets.Value("float32"),
"doc_id": datasets.Value("string"),
"doc_text": datasets.Value("string"),
"doc_lang": datasets.Value("string"),
"viewpoint_human": datasets.Value("string"),
"viewpoint_llm": datasets.Value("string"),
"relevant_human": datasets.Value("bool"),
"relevant_llm": datasets.Value("bool"),
}
),
)
def _split_generators(self, dl_manager):
base_url = self.config.data_root_dir
queries_path = f"{base_url}/queries.tsv"
docs_path = dl_manager.download_and_extract(f"{base_url}/all_docs.json")
human_annotations_path = dl_manager.download_and_extract(
f"{base_url}/human_annotations.tsv"
)
llm_annotations_path = dl_manager.download_and_extract(f"{base_url}/llm_annotations.tsv")
lang = self.config.language
splits = []
downloaded_data = {}
for system in SYSTEMS:
for mode in MODES:
source = f"{system}.{mode}"
downloaded_data[source] = dl_manager.download_and_extract(
{
"hits": f"{base_url}/{lang}/{system}/{mode}/{lang}_query_hits.tsv",
"docs": docs_path,
"queries": queries_path,
"human_annotations": human_annotations_path,
"llm_annotations": llm_annotations_path,
}
)
split = datasets.SplitGenerator(
name=f"{system}.{mode}",
gen_kwargs={
"hits_path": downloaded_data[source]["hits"],
"docs_path": downloaded_data[source]["docs"],
"queries_path": downloaded_data[source]["queries"],
"human_annotations_path": downloaded_data[source]["human_annotations"],
"llm_annotations_path": downloaded_data[source]["llm_annotations"],
},
)
splits.append(split)
return splits
def _skip_viewpoint(self, viewpoint_human, viewpoint_llm, query_entry):
viewpoint = get_label(viewpoint_human, viewpoint_llm, self.annotation_type)
if viewpoint is None:
return True
if self.viewpoint_filter == "Non-controllers":
controller = query_entry["Controller"]
if controller == "Unknown":
return True
claimants = copy(query_entry["Claimants"])
claimants.remove(controller)
return (
not claimants or viewpoint not in claimants
) # skip if not a non-controller viewpoint
# otherwise, handle the case where we want to filter for a specific viewpoint
target_viewpoint = (
query_entry["Controller"]
if self.viewpoint_filter == "Controller"
else self.viewpoint_filter
)
return target_viewpoint and viewpoint != target_viewpoint
def _skip_relevance(self, relevant_human, relevant_llm):
# Filtering logic based on relevance preference
relevant = get_label(relevant_human, relevant_llm, self.annotation_type)
target_relevant = {"relevant": True, "non-relevant": False}.get(self.relevance_filter, None)
return target_relevant is not None and relevant != target_relevant
# If "all", do not filter anything
def _generate_examples(
self, hits_path, docs_path, queries_path, human_annotations_path, llm_annotations_path
):
n_hits = self.config.n_hits
queries_df = pd.read_csv(queries_path, sep="\t").set_index("query_id")
queries_df["Claimants"] = queries_df["Claimants"].str.split(";").map(set)
counter = 0
docs = load_json(docs_path)
hits = pd.read_csv(hits_path, sep="\t")
human_annotations = pd.read_csv(human_annotations_path, sep="\t")
llm_annotations = pd.read_csv(llm_annotations_path, sep="\t")
if n_hits:
hits = hits.groupby("query_id").head(n_hits)
# sort hits by query_id and rank
hits["query_id_int"] = hits["query_id"].str[1:].astype(int)
hits = hits.sort_values(by=["query_id_int", "rank"])
hits = hits.drop(columns=["query_id_int"])
human_map = human_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index")
llm_map = llm_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index")
for _, row in hits.iterrows():
doc_id = row["doc_id"]
doc_lang = row["doc_lang"]
query_id = row["query_id"]
query_entry = queries_df.loc[query_id]
query_text = query_entry["query_text"]
query_lang = query_entry["language"]
# Get Human Data
human_data = human_map.get((query_id, doc_id), {})
relevant_human = human_data.get("relevant", None)
viewpoint_human = human_data.get("territory", None)
# Get LLM Data
llm_data = llm_map.get((query_id, doc_id), {})
relevant_llm = llm_data[f"relevant_{self.llm_mode}"]
viewpoint_llm = llm_data[f"territory_{self.llm_mode}"]
# Filtering logic based on viewpoint preference
viewpoint_llm = viewpoint_llm.split(") ", 1)[-1] if not pd.isna(viewpoint_llm) else None
if self.viewpoint_filter:
do_skip = self._skip_viewpoint(viewpoint_human, viewpoint_llm, query_entry)
if do_skip:
continue
if self.relevance_filter != "all":
do_skip = self._skip_relevance(relevant_human, relevant_llm)
if do_skip:
continue
yield (
counter,
{
"query_id": query_id,
"query": query_text,
"query_lang": query_lang,
"territory": row["territory"],
"rank": row["rank"],
"score": row["score"],
"doc_id": doc_id,
"doc_text": docs[doc_lang][doc_id],
"doc_lang": doc_lang,
"viewpoint_human": viewpoint_human,
"viewpoint_llm": viewpoint_llm,
"relevant_human": relevant_human,
"relevant_llm": relevant_llm,
},
)
counter += 1