Spaces:
Runtime error
Runtime error
meg-huggingface
commited on
Commit
·
1111e1c
1
Parent(s):
ee330de
Fiddling with util display
Browse files- src/display/utils.py +15 -14
src/display/utils.py
CHANGED
|
@@ -20,28 +20,29 @@ class ColumnContent:
|
|
| 20 |
hidden: bool = False
|
| 21 |
never_hidden: bool = False
|
| 22 |
dummy: bool = False
|
|
|
|
| 23 |
|
| 24 |
## Leaderboard columns
|
| 25 |
auto_eval_column_dict = []
|
| 26 |
# Init
|
| 27 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
| 28 |
-
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
| 29 |
#Scores
|
| 30 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
| 31 |
for task in Tasks:
|
| 32 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
| 33 |
# Model information
|
| 34 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False,
|
| 35 |
-
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False,
|
| 36 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False,
|
| 37 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False,
|
| 38 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False,
|
| 39 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False,
|
| 40 |
-
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False,
|
| 41 |
-
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False,
|
| 42 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False,
|
| 43 |
# Dummy column for the search bar (hidden by the custom CSS)
|
| 44 |
-
auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
|
| 45 |
|
| 46 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 47 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
|
| 20 |
hidden: bool = False
|
| 21 |
never_hidden: bool = False
|
| 22 |
dummy: bool = False
|
| 23 |
+
advanced: bool = False
|
| 24 |
|
| 25 |
## Leaderboard columns
|
| 26 |
auto_eval_column_dict = []
|
| 27 |
# Init
|
| 28 |
+
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent(name="T", type="str", displayed_by_default=True, never_hidden=True)])
|
| 29 |
+
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent(name="Model", type="markdown", displayed_by_default=True, never_hidden=True)])
|
| 30 |
#Scores
|
| 31 |
+
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent(name="Average ⬆️", type="number", displayed_by_default=True)])
|
| 32 |
for task in Tasks:
|
| 33 |
+
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(name=task.value.col_name, type="number", displayed_by_default=True)])
|
| 34 |
# Model information
|
| 35 |
+
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent(name="Type", type="str", displayed_by_default=False, advanced=True)])
|
| 36 |
+
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", displayed_by_default=False, advanced=True)])
|
| 37 |
+
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", displayed_by_default=False, advanced=True)])
|
| 38 |
+
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", displayed_by_default=False, advanced=True)])
|
| 39 |
+
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", displayed_by_default=False, advanced=True)])
|
| 40 |
+
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", displayed_by_default=False, advanced=True)])
|
| 41 |
+
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", displayed_by_default=False, advanced=True)])
|
| 42 |
+
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", displayed_by_default=False, advanced=True)])
|
| 43 |
+
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", displayed_by_default=False, advanced=True)])
|
| 44 |
# Dummy column for the search bar (hidden by the custom CSS)
|
| 45 |
+
auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", displayed_by_default=False, dummy=True)])
|
| 46 |
|
| 47 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 48 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|