bowdbeg commited on
Commit
71c1aec
·
1 Parent(s): 3a2569c
Files changed (2) hide show
  1. dicrect_compute_metric.py +1 -1
  2. matching_series.py +41 -3
dicrect_compute_metric.py CHANGED
@@ -3,7 +3,7 @@ from typing import Optional
3
  import evaluate
4
 
5
 
6
- class DirectComputeMetric(evaluate.Metric):
7
  """
8
  Base class for metrics that directly compute the score from the predictions and references without add_batch
9
  """
 
3
  import evaluate
4
 
5
 
6
+ class DirectComputeMetric(evaluate.EvaluationModule):
7
  """
8
  Base class for metrics that directly compute the score from the predictions and references without add_batch
9
  """
matching_series.py CHANGED
@@ -20,8 +20,6 @@ import datasets
20
  import evaluate
21
  import numpy as np
22
 
23
- from dicrect_compute_metric import DirectComputeMetric
24
-
25
  # TODO: Add BibTeX citation
26
  _CITATION = """\
27
  @InProceedings{huggingface:module,
@@ -58,7 +56,7 @@ Examples:
58
 
59
 
60
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
- class matching_series(DirectComputeMetric):
62
  """TODO: Short description of my evaluation module."""
63
 
64
  def _info(self):
@@ -87,6 +85,46 @@ class matching_series(DirectComputeMetric):
87
  """Optional: download external resources useful to compute the scores"""
88
  pass
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  def _compute(
91
  self,
92
  predictions: Union[list, np.ndarray],
 
20
  import evaluate
21
  import numpy as np
22
 
 
 
23
  # TODO: Add BibTeX citation
24
  _CITATION = """\
25
  @InProceedings{huggingface:module,
 
56
 
57
 
58
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
59
+ class matching_series(evaluate.Metric):
60
  """TODO: Short description of my evaluation module."""
61
 
62
  def _info(self):
 
85
  """Optional: download external resources useful to compute the scores"""
86
  pass
87
 
88
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
89
+ """Compute the evaluation module.
90
+
91
+ Usage of positional arguments is not allowed to prevent mistakes.
92
+
93
+ Args:
94
+ predictions (`list/array/tensor`, *optional*):
95
+ Predictions.
96
+ references (`list/array/tensor`, *optional*):
97
+ References.
98
+ **kwargs (optional):
99
+ Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
100
+ method (see details in the docstring).
101
+
102
+ Return:
103
+ `dict` or `None`
104
+
105
+ - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
106
+ - `None` if the evaluation module is not run on the main process (`process_id != 0`).
107
+
108
+ ```py
109
+ >>> import evaluate
110
+ >>> accuracy = evaluate.load("accuracy")
111
+ >>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
112
+ ```
113
+ """
114
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
115
+ if predictions is None and references is None:
116
+ missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
117
+ all_kwargs.update(missing_kwargs)
118
+ else:
119
+ missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
120
+ if missing_inputs:
121
+ raise ValueError(
122
+ f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
123
+ )
124
+ inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
125
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
126
+ return self._compute(**inputs, **compute_kwargs)
127
+
128
  def _compute(
129
  self,
130
  predictions: Union[list, np.ndarray],