eladsegal commited on
Commit
97b4774
·
1 Parent(s): 8ea0404

Update metrics/bleu.py

Browse files
Files changed (1) hide show
  1. metrics/bleu.py +38 -121
metrics/bleu.py CHANGED
@@ -1,122 +1,39 @@
1
- # Copied from https://github.com/tensorflow/nmt/blob/0be864257a76c151eef20ea689755f08bc1faf4e/nmt/scripts/bleu.py
2
-
3
- # Copyright 2017 Google Inc. All Rights Reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- # ==============================================================================
17
-
18
- """Python implementation of BLEU and smooth-BLEU.
19
-
20
- This module provides a Python implementation of BLEU and smooth-BLEU.
21
- Smooth BLEU is computed following the method outlined in the paper:
22
- Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
23
- evaluation metrics for machine translation. COLING 2004.
24
- """
25
-
26
- import collections
27
- import math
28
-
29
-
30
- def _get_ngrams(segment, max_order):
31
- """Extracts all n-grams upto a given maximum order from an input segment.
32
-
33
- Args:
34
- segment: text segment from which n-grams will be extracted.
35
- max_order: maximum length in tokens of the n-grams returned by this
36
- methods.
37
-
38
- Returns:
39
- The Counter containing all n-grams upto max_order in segment
40
- with a count of how many times each n-gram occurred.
41
- """
42
- ngram_counts = collections.Counter()
43
- for order in range(1, max_order + 1):
44
- for i in range(0, len(segment) - order + 1):
45
- ngram = tuple(segment[i : i + order])
46
- ngram_counts[ngram] += 1
47
- return ngram_counts
48
-
49
-
50
- def compute_bleu(translation_corpus, reference_corpus, max_order=4, smooth=False):
51
- """Computes BLEU score of translated segments against one or more references.
52
-
53
- Args:
54
- translation_corpus: list of translations to score. Each translation
55
- should be tokenized into a list of tokens.
56
- reference_corpus: list of lists of references for each translation. Each
57
- reference should be tokenized into a list of tokens.
58
- max_order: Maximum n-gram order to use when computing BLEU score.
59
- smooth: Whether or not to apply Lin et al. 2004 smoothing.
60
-
61
- Returns:
62
- 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
63
- precisions and brevity penalty.
64
- """
65
- matches_by_order = [0] * max_order
66
- possible_matches_by_order = [0] * max_order
67
- reference_length = 0
68
- translation_length = 0
69
- for (references, translation) in zip(reference_corpus, translation_corpus):
70
- reference_length += min(len(r) for r in references)
71
- translation_length += len(translation)
72
-
73
- merged_ref_ngram_counts = collections.Counter()
74
- for reference in references:
75
- merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
76
- translation_ngram_counts = _get_ngrams(translation, max_order)
77
- overlap = translation_ngram_counts & merged_ref_ngram_counts
78
- for ngram in overlap:
79
- matches_by_order[len(ngram) - 1] += overlap[ngram]
80
- for order in range(1, max_order + 1):
81
- possible_matches = len(translation) - order + 1
82
- if possible_matches > 0:
83
- possible_matches_by_order[order - 1] += possible_matches
84
-
85
- precisions = [0] * max_order
86
- for i in range(0, max_order):
87
- if smooth:
88
- precisions[i] = (matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0)
89
- else:
90
- if possible_matches_by_order[i] > 0:
91
- precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
92
- else:
93
- precisions[i] = 0.0
94
-
95
- if min(precisions) > 0:
96
- p_log_sum = sum((1.0 / max_order) * math.log(p) for p in precisions)
97
- geo_mean = math.exp(p_log_sum)
98
- else:
99
- geo_mean = 0
100
-
101
- ratio = float(translation_length) / reference_length
102
-
103
- if ratio > 1.0:
104
- bp = 1.0
105
- else:
106
- bp = math.exp(1 - 1.0 / ratio)
107
-
108
- bleu = geo_mean * bp
109
-
110
- return {
111
- "bleu": bleu,
112
- **{f"precision-{i+1}": round(p, 4) for i, p in enumerate(precisions)},
113
- "brevity_penalty": bp,
114
- "length_ratio": ratio,
115
- "translation_length": translation_length,
116
- "reference_length": reference_length,
117
  }
118
-
119
-
120
- def postprocess_text(text):
121
- # TODO: Tokenize properly
122
- return text.split()
 
1
+ # Copied from https://github.com/huggingface/datasets/blob/76bb45964df1e62d1411b0a9e9fc673e9a791c9a/metrics/sacrebleu/sacrebleu.py
2
+
3
+ import sacrebleu as scb
4
+
5
+
6
+ def compute_bleu(
7
+ predictions,
8
+ references,
9
+ smooth_method="exp",
10
+ smooth_value=None,
11
+ force=False,
12
+ lowercase=False,
13
+ tokenize=None,
14
+ use_effective_order=False,
15
+ ):
16
+ references_per_prediction = len(references[0])
17
+ if any(len(refs) != references_per_prediction for refs in references):
18
+ raise ValueError("Sacrebleu requires the same number of references for each prediction")
19
+ transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
20
+ output = scb.corpus_bleu(
21
+ predictions,
22
+ transformed_references,
23
+ smooth_method=smooth_method,
24
+ smooth_value=smooth_value,
25
+ force=force,
26
+ lowercase=lowercase,
27
+ use_effective_order=use_effective_order,
28
+ **(dict(tokenize=tokenize) if tokenize else {}),
29
+ )
30
+ output_dict = {
31
+ "score": output.score,
32
+ "counts": output.counts,
33
+ "totals": output.totals,
34
+ "precisions": output.precisions,
35
+ "bp": output.bp,
36
+ "sys_len": output.sys_len,
37
+ "ref_len": output.ref_len,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  }
39
+ return output_dict