Spaces:
Sleeping
Sleeping
First commit
Browse files- app.py +5 -0
- gradio_tst.py +130 -0
- regression_evaluator.py +108 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import evaluate
|
2 |
+
from gradio_tst import launch_gradio_widget2
|
3 |
+
|
4 |
+
module = evaluate.load("regression_evaluator.py")
|
5 |
+
launch_gradio_widget2(module)
|
gradio_tst.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from datasets import Value
|
9 |
+
|
10 |
+
import logging
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]")
|
15 |
+
|
16 |
+
|
17 |
+
def infer_gradio_input_types(feature_types):
|
18 |
+
"""
|
19 |
+
Maps metric feature types to input types for gradio Dataframes:
|
20 |
+
- float/int -> numbers
|
21 |
+
- string -> strings
|
22 |
+
- any other -> json
|
23 |
+
Note that json is not a native gradio type but will be treated as string that
|
24 |
+
is then parsed as a json.
|
25 |
+
"""
|
26 |
+
input_types = []
|
27 |
+
for feature_type in feature_types:
|
28 |
+
input_type = "json"
|
29 |
+
if isinstance(feature_type, Value):
|
30 |
+
if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"):
|
31 |
+
input_type = "number"
|
32 |
+
elif feature_type.dtype == "string":
|
33 |
+
input_type = "str"
|
34 |
+
input_types.append(input_type)
|
35 |
+
return input_types
|
36 |
+
|
37 |
+
|
38 |
+
def json_to_string_type(input_types):
|
39 |
+
"""Maps json input type to str."""
|
40 |
+
return ["str" if i == "json" else i for i in input_types]
|
41 |
+
|
42 |
+
|
43 |
+
def parse_readme(filepath):
|
44 |
+
"""Parses a repositories README and removes"""
|
45 |
+
if not os.path.exists(filepath):
|
46 |
+
return "No README.md found."
|
47 |
+
with open(filepath, "r") as f:
|
48 |
+
text = f.read()
|
49 |
+
match = REGEX_YAML_BLOCK.search(text)
|
50 |
+
if match:
|
51 |
+
text = text[match.end() :]
|
52 |
+
return text
|
53 |
+
|
54 |
+
|
55 |
+
def parse_gradio_data(data, input_types):
|
56 |
+
"""Parses data from gradio Dataframe for use in metric."""
|
57 |
+
metric_inputs = {}
|
58 |
+
data.replace("", np.nan, inplace=True)
|
59 |
+
data.dropna(inplace=True)
|
60 |
+
for feature_name, input_type in zip(data, input_types):
|
61 |
+
if input_type == "json":
|
62 |
+
metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()]
|
63 |
+
elif input_type == "str":
|
64 |
+
metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()]
|
65 |
+
else:
|
66 |
+
metric_inputs[feature_name] = data[feature_name]
|
67 |
+
return metric_inputs
|
68 |
+
|
69 |
+
|
70 |
+
def parse_test_cases(test_cases, feature_names, input_types):
|
71 |
+
"""
|
72 |
+
Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added
|
73 |
+
to strings to follow the format in json.
|
74 |
+
"""
|
75 |
+
if len(test_cases) == 0:
|
76 |
+
return None
|
77 |
+
examples = []
|
78 |
+
for test_case in test_cases:
|
79 |
+
parsed_cases = []
|
80 |
+
for feat, input_type in zip(feature_names, input_types):
|
81 |
+
if input_type == "json":
|
82 |
+
parsed_cases.append([str(element) for element in test_case[feat]])
|
83 |
+
elif input_type == "str":
|
84 |
+
parsed_cases.append(['"' + element + '"' for element in test_case[feat]])
|
85 |
+
else:
|
86 |
+
parsed_cases.append(test_case[feat])
|
87 |
+
examples.append([list(i) for i in zip(*parsed_cases)])
|
88 |
+
return examples
|
89 |
+
|
90 |
+
|
91 |
+
def launch_gradio_widget2(metric):
|
92 |
+
"""Launches `metric` widget with Gradio."""
|
93 |
+
|
94 |
+
try:
|
95 |
+
import gradio as gr
|
96 |
+
except ImportError as error:
|
97 |
+
logging.error("To create a metric widget with Gradio make sure gradio is installed.")
|
98 |
+
raise error
|
99 |
+
|
100 |
+
local_path = Path(sys.path[0])
|
101 |
+
# if there are several input types, use first as default.
|
102 |
+
if isinstance(metric.features, list):
|
103 |
+
(feature_names, feature_types) = zip(*metric.features[0].items())
|
104 |
+
else:
|
105 |
+
(feature_names, feature_types) = zip(*metric.features.items())
|
106 |
+
gradio_input_types = infer_gradio_input_types(feature_types)
|
107 |
+
|
108 |
+
def compute(data):
|
109 |
+
return metric.compute(**parse_gradio_data(data, gradio_input_types))
|
110 |
+
|
111 |
+
iface = gr.Interface(
|
112 |
+
fn=compute,
|
113 |
+
inputs=gr.Dataframe(
|
114 |
+
headers=feature_names,
|
115 |
+
col_count=len(feature_names),
|
116 |
+
row_count=1,
|
117 |
+
datatype=json_to_string_type(gradio_input_types),
|
118 |
+
),
|
119 |
+
outputs=gr.Textbox(label=metric.name),
|
120 |
+
description=(
|
121 |
+
metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes."
|
122 |
+
" Alternatively you can use a JSON-formatted list as input."
|
123 |
+
),
|
124 |
+
title=f"Metric: {metric.name}",
|
125 |
+
article=parse_readme(local_path / "README.md"),
|
126 |
+
# TODO: load test cases and use them to populate examples
|
127 |
+
# examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
|
128 |
+
)
|
129 |
+
|
130 |
+
iface.launch(share=True)
|
regression_evaluator.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import evaluate
|
2 |
+
from datasets import Features, Value
|
3 |
+
from scipy.stats import kendalltau, pearsonr, spearmanr
|
4 |
+
from sklearn.metrics import (
|
5 |
+
max_error,
|
6 |
+
mean_absolute_error,
|
7 |
+
mean_absolute_percentage_error,
|
8 |
+
mean_squared_error,
|
9 |
+
r2_score,
|
10 |
+
)
|
11 |
+
|
12 |
+
_CITATION = """
|
13 |
+
@article{scikit-learn,
|
14 |
+
title={Scikit-learn: Machine Learning in {P}ython},
|
15 |
+
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
16 |
+
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
17 |
+
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
18 |
+
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
19 |
+
journal={Journal of Machine Learning Research},
|
20 |
+
volume={12},
|
21 |
+
pages={2825--2830},
|
22 |
+
year={2011}
|
23 |
+
}
|
24 |
+
|
25 |
+
@article{2020SciPy-NMeth,
|
26 |
+
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
|
27 |
+
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
|
28 |
+
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
|
29 |
+
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
|
30 |
+
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
|
31 |
+
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
|
32 |
+
Kern, Robert and Larson, Eric and Carey, C J and
|
33 |
+
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
|
34 |
+
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
|
35 |
+
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
|
36 |
+
Harris, Charles R. and Archibald, Anne M. and
|
37 |
+
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
|
38 |
+
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
|
39 |
+
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
|
40 |
+
Computing in Python}},
|
41 |
+
journal = {Nature Methods},
|
42 |
+
year = {2020},
|
43 |
+
volume = {17},
|
44 |
+
pages = {261--272},
|
45 |
+
adsurl = {https://rdcu.be/b08Wh},
|
46 |
+
doi = {10.1038/s41592-019-0686-2},
|
47 |
+
}
|
48 |
+
"""
|
49 |
+
|
50 |
+
|
51 |
+
_DESCRIPTION = """
|
52 |
+
This evaluator computes multiple regression metrics to assess the performance of a model. Metrics calculated include: mean absolute error (MAE),
|
53 |
+
mean absolute percentage error (MAPE), mean squared error (MSE), R-squared (R2), max error (ME), Pearson, Spearman and Kendall Tau correlation measures.
|
54 |
+
"""
|
55 |
+
|
56 |
+
_KWARGS_DESCRIPTION = """
|
57 |
+
Args:
|
58 |
+
predictions (`list` of `float`): Predicted values.
|
59 |
+
references (`list` of `float`): Ground truth values.
|
60 |
+
Returns:
|
61 |
+
Returns: a dict containing:
|
62 |
+
mean_absolute_error (float): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html
|
63 |
+
mean_absolute_performance_error (float): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html
|
64 |
+
mean_squared_error (float): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html
|
65 |
+
r2_score (float): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
|
66 |
+
max_error (float): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.max_error.html
|
67 |
+
pearson_correlation (Tuple[float, float]): the first value being the score and the second one the p-value
|
68 |
+
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html)
|
69 |
+
spearman_correlation (Tuple[float, float]): the first value being the score and the second one the p-value
|
70 |
+
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html)
|
71 |
+
kendall_tau_correlation (Tuple[float, float]): the first value being the score and the second one the p-value
|
72 |
+
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kendalltau.html)
|
73 |
+
"""
|
74 |
+
|
75 |
+
|
76 |
+
class RegressionEvaluator(evaluate.Metric):
|
77 |
+
def _info(self):
|
78 |
+
return evaluate.MetricInfo(
|
79 |
+
description=_DESCRIPTION,
|
80 |
+
citation=_CITATION,
|
81 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
82 |
+
features=Features(
|
83 |
+
{"predictions": Value("float"), "references": Value("float")}
|
84 |
+
),
|
85 |
+
)
|
86 |
+
|
87 |
+
def _compute(self, predictions, references):
|
88 |
+
error_fns = [
|
89 |
+
mean_absolute_error,
|
90 |
+
mean_absolute_percentage_error,
|
91 |
+
mean_squared_error,
|
92 |
+
max_error,
|
93 |
+
r2_score,
|
94 |
+
]
|
95 |
+
correlation_fns = [pearsonr, spearmanr, kendalltau]
|
96 |
+
results = {}
|
97 |
+
|
98 |
+
# Compute error functions
|
99 |
+
for fn in error_fns:
|
100 |
+
results[fn.__name__] = fn(references, predictions)
|
101 |
+
|
102 |
+
# Compute statistical measures with p-values
|
103 |
+
for fn in correlation_fns:
|
104 |
+
output = fn(references, predictions)
|
105 |
+
score, p_value = output.statistic, output.pvalue
|
106 |
+
results[fn.__name__] = (float(score), float(p_value))
|
107 |
+
|
108 |
+
return results
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
evaluate
|
2 |
+
datasets
|
3 |
+
scikit-learn
|
4 |
+
scipy
|
5 |
+
gradio
|