Datasets:

ArXiv:
License:
bordirlines / bordirlines.py
adwaitagashe's picture
Updated dataset loading to include anotations
7c21e35
raw
history blame
8.56 kB
import json
from functools import lru_cache
import datasets
import pandas as pd
SUPPORTED_LANGUAGES = [
"sl",
"ur",
"sw",
"uz",
"vi",
"sq",
"ms",
"km",
"hy",
"da",
"ky",
"mg",
"mn",
"ja",
"el",
"it",
"is",
"ru",
"tl",
"so",
"pt",
"uk",
"sr",
"sn",
"ht",
"bs",
"my",
"ar",
"hr",
"nl",
"bn",
"ne",
"hi",
"ka",
"az",
"ko",
"id",
"fr",
"es",
"en",
"fa",
"lo",
"iw",
"th",
"tr",
"zht",
"zhs",
"ti",
"tg",
"control",
]
SYSTEMS = ["openai", "m3"]
MODES = ["qlang", "qlang_en", "en", "rel_langs"]
# # get combination of systems and supported modes
# SUPPORTED_SOURCES = [f"{system}.{mode}" for system in SYSTEMS for mode in MODES]
ROOT_DIR = "data"
class BordIRlinesConfig(datasets.BuilderConfig):
def __init__(self, language, n_hits=10, **kwargs):
super(BordIRlinesConfig, self).__init__(**kwargs)
self.language = language
self.n_hits = n_hits
self.data_root_dir = ROOT_DIR
def load_json(path):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
@lru_cache
def replace_lang_str(path, lang):
parent = path.rsplit("/", 2)[0]
return f"{parent}/{lang}/{lang}_docs.json"
class BordIRLinesDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
BordIRlinesConfig(
name=lang,
language=lang,
description=f"{lang.upper()} dataset",
)
for lang in SUPPORTED_LANGUAGES
]
def __init__(self, *args, relevant_only=False, annotation_type=None, llm_mode="fewshot", **kwargs):
super().__init__(*args, **kwargs)
self.relevant_only = relevant_only
self.annotation_type = annotation_type
self.llm_mode = llm_mode # Choose between "zeroshot" and "fewshot". Default: "fewshot".
def _info(self):
return datasets.DatasetInfo(
description="IR Dataset for BordIRLines paper.",
features=datasets.Features(
{
"query_id": datasets.Value("string"),
"query": datasets.Value("string"),
"query_lang": datasets.Value("string"),
"territory": datasets.Value("string"),
"rank": datasets.Value("int32"),
"score": datasets.Value("float32"),
"doc_id": datasets.Value("string"),
"doc_text": datasets.Value("string"),
"doc_lang": datasets.Value("string"),
"relevant_human": datasets.Value("bool"),
"territory_human": datasets.Sequence(datasets.Value("string")),
"relevant_llm_zeroshot": datasets.Value("bool"),
"relevant_llm_fewshot": datasets.Value("bool"),
}
),
)
def _split_generators(self, dl_manager):
base_url = self.config.data_root_dir
queries_path = f"{base_url}/queries.tsv"
docs_path = dl_manager.download_and_extract(f"{base_url}/all_docs.json")
human_annotations_path = dl_manager.download_and_extract(f"{base_url}/human_annotations.tsv")
llm_annotations_path = dl_manager.download_and_extract(f"{base_url}/llm_annotations.tsv")
lang = self.config.language
splits = []
downloaded_data = {}
for system in SYSTEMS:
for mode in MODES:
source = f"{system}.{mode}"
downloaded_data[source] = dl_manager.download_and_extract(
{
"hits": f"{base_url}/{lang}/{system}/{mode}/{lang}_query_hits.tsv",
"docs": docs_path,
"queries": queries_path,
"human_annotations": human_annotations_path,
"llm_annotations": llm_annotations_path,
}
)
split = datasets.SplitGenerator(
name=f"{system}.{mode}",
gen_kwargs={
"hits_path": downloaded_data[source]["hits"],
"docs_path": downloaded_data[source]["docs"],
"queries_path": downloaded_data[source]["queries"],
"human_annotations_path": downloaded_data[source]["human_annotations"],
"llm_annotations_path": downloaded_data[source]["llm_annotations"],
},
)
splits.append(split)
return splits
def _generate_examples(self, hits_path, docs_path, queries_path, human_annotations_path, llm_annotations_path):
n_hits = self.config.n_hits
queries_df = pd.read_csv(queries_path, sep="\t")
query_map = dict(zip(queries_df["query_id"], queries_df["query_text"]))
query_to_lang_map = dict(zip(queries_df["query_id"], queries_df["language"]))
counter = 0
docs = load_json(docs_path)
hits = pd.read_csv(hits_path, sep="\t")
human_annotations = pd.read_csv(human_annotations_path, sep="\t")
llm_annotations = pd.read_csv(llm_annotations_path, sep="\t")
if n_hits:
hits = hits.groupby("query_id").head(n_hits)
# sort hits by query_id and rank
hits["query_id_int"] = hits["query_id"].str[1:].astype(int)
hits = hits.sort_values(by=["query_id_int", "rank"])
hits = hits.drop(columns=["query_id_int"])
human_map = human_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index")
llm_map = llm_annotations.set_index(["query_id", "doc_id"]).to_dict(orient="index")
for _, row in hits.iterrows():
doc_id = row["doc_id"]
doc_lang = row["doc_lang"]
query_id = row["query_id"]
query_text = query_map[query_id]
query_lang = query_to_lang_map[query_id]
# Get Human Data
human_data = human_map.get((query_id, doc_id), {})
# Parse relevant_human_votes manually
raw_votes = human_data.get("relevant_human", "[]")
relevant_human_votes = [
True if v.strip() == "True" else False if v.strip() == "False" else False
for v in raw_votes.strip("[]").split(",")
if v.strip()
]
# Parse territory_human manually
raw_territories = human_data.get("territory_human", "[]")
territory_human = [
v.strip().strip("'").strip('"') # Remove extra quotes and whitespace
for v in raw_territories.strip("[]").split(",")
if v.strip()
]
# Calculate majority relevance
majority_relevant_human = (
sum(relevant_human_votes) > len(relevant_human_votes) / 2 if relevant_human_votes else False
)
# Get LLM Data
llm_data = llm_map.get((query_id, doc_id), {})
relevant_llm = (
llm_data.get("relevant_fewshot", None)
if self.llm_mode == "fewshot"
else llm_data.get("relevant_zeroshot", None)
)
# Filtering logic
if self.relevant_only:
if self.annotation_type == "human" and not majority_relevant_human:
continue
elif self.annotation_type == "llm" and not (relevant_llm is True):
continue
elif not majority_relevant_human and not (relevant_llm is True):
continue
yield (
counter,
{
"query_id": query_id,
"query": query_text,
"query_lang": query_lang,
"territory": row["territory"],
"rank": row["rank"],
"score": row["score"],
"doc_id": doc_id,
"doc_text": docs[doc_lang][doc_id],
"doc_lang": doc_lang,
"relevant_human": majority_relevant_human,
"territory_human": territory_human,
"relevant_llm_zeroshot": llm_data.get("relevant_zeroshot", None),
"relevant_llm_fewshot": llm_data.get("relevant_fewshot", None),
},
)
counter += 1