Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
csv
Languages:
Japanese
Size:
10K - 100K
ArXiv:
License:
File size: 6,294 Bytes
51b32a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import datasets
_CITATION = """
@inproceedings{zhang2025adtec,
title={{AdTEC}: A Unified Benchmark for Evaluating Text Quality in Search Engine Advertising},
author={Peinan Zhang and Yusuke Sakai and Masato Mita and Hiroki Ouchi and Taro Watanabe},
booktitle={Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)},
year={2025},
publisher={Association for Computational Linguistics},
eprint={2408.05906},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2408.05906},
}
"""
_DESCRIPTION = """
AdTEC is a Japanese dataset designed to evaluate the quality of ad texts from multiple aspects, considering practical advertising operations. The dataset contains only Japanese text and covers the online advertisement (search engine advertising) domain. It consists of the following tasks:
1. Ad Acceptability: Given a text, predict the acceptance of overall quality with binary labels: acceptable/unacceptable.
2. Ad Consistency: Given a pair of ad text and landing page (LP) text, predict the consistency between the ad and LP text with binary labels: consistent/inconsistent.
3. Ad Similarity: Given a pair of ad texts, predict their similarity with a score ranging from 1 to 5.
4. A3 Recognition: Given a text, predict all possible aspects of advertising appeals (A3) as multi-labels. The comprehensive list of A3 can be found in Murakami et al., 2022.
Each task is provided in TSV format and split into train, dev, and test sets. The dataset was created by Peinan Zhang, Yusuke Sakai, Masato Mita, Hiroki Ouchi, and Taro Watanabe. For more details, see: https://arxiv.org/abs/2408.05906
"""
class AdtecConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class Adtec(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
AdtecConfig(
name="ad-acceptability",
description="Acceptability classification of ad titles (acceptable/unacceptable).",
version=datasets.Version("1.0.0"),
),
AdtecConfig(
name="ad-consistency",
description="Consistency classification between ad titles and landing page texts (consistent/inconsistent).",
version=datasets.Version("1.0.0"),
),
AdtecConfig(
name="ad-similarity",
description="Semantic similarity regression between pairs of ad texts (score: 1.0-5.0).",
version=datasets.Version("1.0.0"),
),
AdtecConfig(
name="a3-recognition",
description="Multi-label recognition of elements in ad titles (e.g., product features, offer, etc.).",
version=datasets.Version("1.0.0"),
),
]
def _info(self):
if self.config.name == "ad-acceptability":
features = datasets.Features(
{
"label": datasets.ClassLabel(names=["acceptable", "unacceptable"]),
"title": datasets.Value("string"),
}
)
elif self.config.name == "ad-consistency":
features = datasets.Features(
{
"label": datasets.ClassLabel(names=["consistent", "inconsistent"]),
"lp_text": datasets.Value("string"),
"title": datasets.Value("string"),
}
)
elif self.config.name == "ad-similarity":
features = datasets.Features(
{
"text1": datasets.Value("string"),
"text2": datasets.Value("string"),
"score": datasets.Value("float32"),
}
)
elif self.config.name == "a3-recognition":
features = datasets.Features(
{
"title": datasets.Value("string"),
"labels": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = "data/" + self.config.name
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": f"{data_dir}/train.tsv"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": f"{data_dir}/valid.tsv"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": f"{data_dir}/test.tsv"},
),
]
def _generate_examples(self, filepath):
if self.config.name == "ad-acceptability":
# TSV: label, title
with open(filepath, encoding="utf-8") as f:
next(f) # skip header
for idx, line in enumerate(f):
label, title = line.strip().split("\t")
yield idx, {"label": label, "title": title}
elif self.config.name == "ad-consistency":
# TSV: label, lp_text, title
with open(filepath, encoding="utf-8") as f:
next(f)
for idx, line in enumerate(f):
label, lp_text, title = line.strip().split("\t")
yield idx, {"label": label, "lp_text": lp_text, "title": title}
elif self.config.name == "ad-similarity":
# TSV: text1, text2, score
with open(filepath, encoding="utf-8") as f:
next(f)
for idx, line in enumerate(f):
text1, text2, score = line.strip().split("\t")
yield idx, {"text1": text1, "text2": text2, "score": float(score)}
elif self.config.name == "a3-recognition":
# TSV: title, labels
with open(filepath, encoding="utf-8") as f:
next(f)
for idx, line in enumerate(f):
title, labels = line.strip("\n ").split("\t")
label_list = labels.split("|")
yield idx, {"title": title, "labels": label_list}
|