Datasets:
tau
/

Languages:
English
ArXiv:
File size: 16,066 Bytes
ee3ae9f
 
 
 
 
 
 
9af1d79
 
ee3ae9f
9af1d79
 
ee3ae9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
""" Zero-zero_scrolls benchmark metric. """

from collections import defaultdict
from copy import deepcopy
import datasets

# fmt: off
from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text  # From: https://huggingface.co/datasets/tau/zero_scrolls/raw/main/metrics/rouge.py
from .accuracy import compute_accuracy  # From: https://huggingface.co/datasets/tau/zero_scrolls/raw/main/metrics/accuracy.py
from .f1 import compute_f1  # From: https://huggingface.co/datasets/tau/zero_scrolls/raw/main/metrics/f1.py
from .exp_similarity import compute_exp_similarity  # From: https://huggingface.co/datasets/tau/zero_scrolls/raw/main/metrics/exp_similarity.py
from .concordance_index import compute_concordance_index  # From: https://huggingface.co/datasets/tau/zero_scrolls/raw/main/metrics/concordance_index.py

# fmt: on

_CITATION = """
"""

_DESCRIPTION = """
ZeroSCROLLS: Zero-Shot CompaRison Over Long Language Sequences.
A zero shot benchmark for long text reasoning.
https://zero.scrolls-benchmark.com/
"""

_KWARGS_DESCRIPTION = """
Compute zero_scrolls evaluation metric associated to each zero_scrolls dataset.
Args:
    predictions: list of predictions to score.
        Each prediction should be a string.
    references: list of lists of references for each example.
        Each reference should be a string.
Returns: depending on the zero_scrolls subset, one or several of:
    "accuracy": Accuracy score
    "f1": F1 score
    "rouge": ROUGE score
    "exp_similarity": Exponential Similarity score
    "concordance_index": Concordance Index score

Use the following code to download the metric:
```
import os, shutil
from huggingface_hub import hf_hub_download
def download_metric():
    zero_scrolls_metric_path = hf_hub_download(repo_id="tau/zero_scrolls", repo_type="dataset", filename="metrics/zero_scrolls.py")
    updated_zero_scrolls_metric_path = (
        os.path.dirname(zero_scrolls_metric_path) + os.path.basename(zero_scrolls_metric_path).replace(".", "_") + ".py"
    )
    shutil.copy(zero_scrolls_metric_path, updated_zero_scrolls_metric_path)
    return updated_zero_scrolls_metric_path

zero_scrolls_metric_path = download_metric()
```

Examples:
    
    >>>  predictions = ["hello there", "general kenobi"]  # List[str]
    >>>  references = [["hello", "hi there"], ["commander kenobi"]]  # List[List[str]]
    >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'gov_report')  #  "gov_report" or "summ_screen_fd" or "qmsum" or "squality]
    >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references)
    >>> print(results)
    {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136, 
    'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'zero_scrolls_score': 55.8136, 
    'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]}
    
    >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'narrative_qa')  # "qasper" or "narrative_qa" or "musique"
    >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references)
    >>> print(results)
    {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'zero_scrolls_score': 72.2222, 
    'display_keys': ['f1'], 'display': [72.2222]}
    
    >>>  predictions = ["The answer is (B)", "D", "A"]  # List[str]
    >>>  references = [["B"], ["C"], ["C"]]  # List[List[str]]
    >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'quality')
    >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references)
    >>> print(results)
    {'accuracy': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 6.3333, 'zero_scrolls_score': 33.3333, 'display_keys': ['accuracy'], 'display': [33.3333]}
    'display_keys': ['accuracy'], 'display': [33.3333]}
    
    >>>  predictions = ["Answer: 4,1,2,3", "2,4,5,4,1"]  # List[str]
    >>>  references = [["1,2,3,4"], ["5,3,2,1,4"]]  # List[List[str]]
    >>>  zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'book_sum_sort')
    >>>  results = zero_scrolls_metric.compute(predictions=predictions, references=references)
    >>>  print(results)
    {'concordance_index': 25.0, 'num_predicted': 2, 'mean_prediction_length_characters': 12.0, 'zero_scrolls_score': 25.0, 'display_keys': ['concordance_index'], 'display': [25.0]}
    
    >>>  predictions = ["There are 30% positive reviews", "25%"]  # List[str]
    >>>  references = [["40%"], ["82%"]]  # List[List[str]]
    >>>  zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'space_digest')
    >>>  results = zero_scrolls_metric.compute(predictions=predictions, references=references)
    >>>  print(results)
    {'exp_similarity': 25.9618, 'num_predicted': 2, 'mean_prediction_length_characters': 16.5, 'zero_scrolls_score': 25.9618, 'display_keys': ['exp_similarity'], 'display': [25.9618]}
"""

DATASET_TO_METRICS = {
    "gov_report": {
        "metrics_to_compute": ["rouge"],
        "zero_scrolls_score_key": "rouge/geometric_mean",
        "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
    },
    "narrative_qa": {
        "metrics_to_compute": ["f1"],
        "zero_scrolls_score_key": "f1",
        "display_keys": ["f1"],
    },
    "qasper": {
        "metrics_to_compute": ["f1"],
        "zero_scrolls_score_key": "f1",
        "display_keys": ["f1"],
    },
    "qmsum": {
        "metrics_to_compute": ["rouge"],
        "zero_scrolls_score_key": "rouge/geometric_mean",
        "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
    },
    "summ_screen_fd": {
        "metrics_to_compute": ["rouge"],
        "zero_scrolls_score_key": "rouge/geometric_mean",
        "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
    },
    "quality": {
        "metrics_to_compute": ["accuracy"],
        "zero_scrolls_score_key": "accuracy",
        "display_keys": ["accuracy"],
    },
    "quality_hard": {
        "metrics_to_compute": ["accuracy"],
        "zero_scrolls_score_key": None,
        "display_keys": ["accuracy"],
    },
    "squality": {
        "metrics_to_compute": ["rouge"],
        "zero_scrolls_score_key": "rouge/geometric_mean",
        "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
    },
    "musique": {
        "metrics_to_compute": ["f1"],
        "zero_scrolls_score_key": "f1",
        "display_keys": ["f1"],
    },
    "space_digest": {
        "metrics_to_compute": ["exp_similarity"],
        "zero_scrolls_score_key": "exp_similarity",
        "display_keys": ["exp_similarity"],
    },
    "book_sum_sort": {
        "metrics_to_compute": ["concordance_index"],
        "zero_scrolls_score_key": "concordance_index",
        "display_keys": ["concordance_index"],
    },
}


@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ZeroScrolls(datasets.Metric):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._compute_helper_kwargs_fn = {
            "rouge": lambda: {
                "metric_fn": compute_rouge,
                "agg_fn": max,
                "metric_fn_kwargs": {"use_stemmer": False},
                "metric_returns_per_example": True,
                "transform_single_input_fn": lambda text: rouge_postprocess_text(text),
                "transform_result_fn": lambda output: {
                    key: (value[0] if isinstance(value, list) else value).fmeasure * 100
                    for key, value in output.items()
                },
                "transform_aggregated_result_fn": lambda output: output.update(
                    {"geometric_mean": (output["rouge1"] * output["rouge2"] * output["rougeL"]) ** (1.0 / 3.0)}
                )
                                                                 or output,
            },
            "accuracy": lambda: {
                "metric_fn": compute_accuracy,
                "agg_fn": None,  # compute_accuracy already takes max
                "transform_result_fn": lambda output: {None: output},
            },
            "f1": lambda: {
                "metric_fn": compute_f1,
                "agg_fn": None,  # compute_f1 already takes max
                "transform_result_fn": lambda output: {None: output},
            },
            "exp_similarity": lambda: {
                "metric_fn": compute_exp_similarity,
                "agg_fn": None,  # compute_exp_similarity already takes max
                "transform_result_fn": lambda output: {None: output},
            },
            "concordance_index": lambda: {
                "metric_fn": compute_concordance_index,
                "agg_fn": None,  # compute_concordance_index already takes max
                "transform_result_fn": lambda output: {None: output},
            },
        }

        custom_metrics = (
            [metric for metric in self.config_name.split(",") if len(metric) > 0]
            if self.config_name.startswith(",")
            else None
        )
        if custom_metrics is not None:
            for metric in custom_metrics:
                if metric not in self._compute_helper_kwargs_fn:
                    raise KeyError(
                        f"You should supply a metric name selected in {list(self._compute_helper_kwargs_fn.keys())}"
                    )
            self._metrics_to_compute = custom_metrics
        else:
            if self.config_name not in DATASET_TO_METRICS:
                raise KeyError(f"You should supply a configuration name selected in {list(DATASET_TO_METRICS.keys())}")
            self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"]

    def _info(self):
        return datasets.MetricInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            inputs_description=_KWARGS_DESCRIPTION,
            features=datasets.Features(
                {
                    "predictions": datasets.Value("string"),
                    "references": datasets.Sequence(datasets.Value("string")),
                }
            ),
            codebase_urls=[],
            reference_urls=[],
        )

    def convert_from_map_format(self, id_to_pred, id_to_labels):
        index_to_id = list(id_to_pred.keys())
        predictions = [id_to_pred[id_] for id_ in index_to_id]
        references = [id_to_labels[id_] for id_ in index_to_id]
        return {"predictions": predictions, "references": references}

    def _compute(self, predictions, references):
        metrics = {}
        for metric in self._metrics_to_compute:
            result = _compute_helper(
                deepcopy(predictions),
                deepcopy(references),
                **self._compute_helper_kwargs_fn[metric](),
            )
            metrics.update(
                {(f"{metric}/{key}" if key is not None else metric): value for key, value in result.items()}
            )
        metrics["num_predicted"] = len(predictions)
        prediction_lengths = [len(prediction) for prediction in predictions]
        metrics["mean_prediction_length_characters"] = sum(prediction_lengths) / len(prediction_lengths)

        metrics = {key: round(value, 4) for key, value in metrics.items()}

        if self.config_name in DATASET_TO_METRICS:
            zero_scrolls_score_key = DATASET_TO_METRICS[self.config_name]["zero_scrolls_score_key"]
            if zero_scrolls_score_key is not None:
                metrics["zero_scrolls_score"] = metrics[zero_scrolls_score_key]
            else:
                metrics["zero_scrolls_score"] = None

            display_keys = DATASET_TO_METRICS[self.config_name]["display_keys"]
            metrics["display_keys"] = display_keys
            metrics["display"] = []
            for display_key in display_keys:
                metrics["display"].append(metrics[display_key])

        return metrics


def _compute_helper(
        predictions,
        references,
        metric_fn,
        agg_fn,
        metric_fn_kwargs=None,
        transform_single_input_fn=None,
        transform_result_fn=None,
        transform_aggregated_result_fn=None,
        metric_returns_per_example=False,
):
    if metric_fn_kwargs is None:
        metric_fn_kwargs = {}

    if agg_fn is None:
        assert metric_returns_per_example is False

    if transform_single_input_fn is not None:
        predictions = [transform_single_input_fn(prediction) for prediction in predictions]
        references = [
            [transform_single_input_fn(reference) for reference in reference_list] for reference_list in references
        ]

    if transform_result_fn is None:
        transform_result_fn = lambda x: x
        do_transform_result = False
    else:
        do_transform_result = True

    if transform_aggregated_result_fn is None:
        transform_aggregated_result_fn = lambda x: x

    if agg_fn is not None:
        # Required when the metric doesn't do the aggregation we need
        scores = defaultdict(list)
        if metric_returns_per_example is False:
            # If when given a list of prediction and references the metric returns an aggregated score,
            # we need to compute the metric for each prediction and reference and then aggregate the results.
            # This is only an issue when we want to get the best aggregated score (e.g. max) for prediction
            # with multiple references.
            for prediction, reference_list in zip(predictions, references):
                prediction_scores = defaultdict(list)
                for reference in reference_list:
                    result = transform_result_fn(metric_fn([prediction], [reference], **metric_fn_kwargs))
                    for key in result:
                        prediction_scores[key].append(result[key])
                for key in prediction_scores:
                    scores[key].append(agg_fn(prediction_scores[key]))
        else:
            # Flatten the references and then aggregate per prediction with agg_fn
            mapping = [[] for _ in range(len(predictions))]
            flattened_predictions = []
            flattened_references = []
            for i, prediction in enumerate(predictions):
                for reference in references[i]:
                    flattened_predictions.append(prediction)
                    flattened_references.append(reference)
                    mapping[i].append(len(flattened_references) - 1)

            results = metric_fn(flattened_predictions, flattened_references, **metric_fn_kwargs)
            if isinstance(results, dict):
                # Convert a dictionary with lists per key to a list with dictionary with the same keys per element
                results_list = [{k: None for k in results} for _ in range(len(flattened_predictions))]
                for k, v in results.items():
                    for i in range(len(v)):
                        results_list[i][k] = v[i]
            else:
                results_list = results

            if do_transform_result:
                for i in range(len(results_list)):
                    results_list[i] = transform_result_fn(results_list[i])

            for reference_indexes in mapping:
                prediction_scores = defaultdict(list)
                for reference_index in reference_indexes:
                    result = results_list[reference_index]
                    for key in result:
                        prediction_scores[key].append(result[key])
                for key in prediction_scores:
                    scores[key].append(agg_fn(prediction_scores[key]))

        return transform_aggregated_result_fn({key: sum(value) / len(value) for key, value in scores.items()})
    else:
        return transform_aggregated_result_fn(
            transform_result_fn(metric_fn(predictions, references, **metric_fn_kwargs))
        )