Konstantin Chernyshev commited on
Commit
ff4f460
·
1 Parent(s): 89f5ccb

feat: add mvp leaderboard

Browse files
app.py CHANGED
@@ -1,204 +1,208 @@
 
 
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
  )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
 
31
 
32
  def restart_space():
 
 
 
33
  API.restart_space(repo_id=REPO_ID)
34
 
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
  if dataframe is None or dataframe.empty:
62
  raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
-
92
- demo = gr.Blocks(css=custom_css)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  with demo:
94
  gr.HTML(TITLE)
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
 
 
 
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
 
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
-
147
- with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
 
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
- )
200
 
201
  scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
  scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
+ import os
2
+
3
  import gradio as gr
 
4
  import pandas as pd
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
+ from huggingface_hub import HfApi
7
+
8
+ from src.about import CITATION_TEXT, INTRODUCTION_TEXT, LLM_BENCHMARKS_TEXT, TITLE
9
+ from src.populate import (
10
+ MU_MATH_COLUMNS_DICT,
11
+ U_MATH_COLUMNS_DICT,
12
+ Field,
13
+ get_mu_math_leaderboard_df,
14
+ get_u_math_leaderboard_df,
 
 
 
 
 
 
 
 
 
 
 
 
15
  )
 
 
 
16
 
17
 
18
  def restart_space():
19
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
20
+ API = HfApi(token=TOKEN)
21
+ REPO_ID = "toloka/u-math-leaderboard"
22
  API.restart_space(repo_id=REPO_ID)
23
 
24
+
25
+ LEADERBOARD_U_MATH_DF = get_u_math_leaderboard_df()
26
+ LEADERBOARD_MU_MATH_DF = get_mu_math_leaderboard_df()
27
+
28
+
29
+ def init_leaderboard(dataframe: pd.DataFrame, columns_dict: dict[str, Field]) -> gr.components.Component:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  if dataframe is None or dataframe.empty:
31
  raise ValueError("Leaderboard DataFrame is empty or None.")
32
+
33
+ def filter_dataframe_by_selected_columns(full_df: pd.DataFrame, columns: list[str]) -> pd.DataFrame:
34
+ always_here_cols = [c.pretty_name for c in columns_dict.values() if c.never_hidden]
35
+ selected_columns = [c for c in columns if c in full_df.columns and c not in always_here_cols]
36
+ # keep the order of the columns
37
+ filtered_df = full_df[[c for c in full_df.columns if c in (always_here_cols + selected_columns)]]
38
+ return filtered_df
39
+
40
+ def filter_dataframe_by_selected_tag_columns(
41
+ full_df: pd.DataFrame, current_tag: str
42
+ ) -> tuple[pd.DataFrame, list[str], str, str]:
43
+ always_here_cols = [c.pretty_name for c in columns_dict.values() if c.never_hidden]
44
+ selected_columns = [
45
+ c.pretty_name for c in columns_dict.values() if current_tag in c.tags and c not in always_here_cols
46
+ ]
47
+ # keep the order of the columns
48
+ filtered_df = full_df[[c for c in full_df.columns if c in (always_here_cols + selected_columns)]]
49
+ _columns_to_select_visibility = [
50
+ c.pretty_name for c in columns_dict.values() if not c.fully_hidden and not c.never_hidden
51
+ ]
52
+ return filtered_df, [c for c in _columns_to_select_visibility if c in filtered_df.columns], "All", "All"
53
+
54
+ def filter_dataframe_by_search(full_df: pd.DataFrame, current_df: pd.DataFrame, search: str) -> pd.DataFrame:
55
+ filtered_df = full_df[
56
+ (full_df[columns_dict["model_name"].pretty_name].str.contains(search, case=False, na=False))
57
+ ]
58
+ return filtered_df[current_df.columns]
59
+
60
+ def filter_dataframe_by_model_type(
61
+ full_df: pd.DataFrame, current_df: pd.DataFrame, filter_name: str
62
+ ) -> pd.DataFrame:
63
+ if filter_name == "All":
64
+ return full_df[current_df.columns]
65
+ else:
66
+ # actually filter by emoji
67
+ query_symbol = filter_name[0]
68
+ filtered_df = full_df[full_df[columns_dict["model_type_symbol"].pretty_name] == query_symbol]
69
+ return filtered_df[current_df.columns]
70
+
71
+ def filter_dataframe_by_model_size(
72
+ full_df: pd.DataFrame, current_df: pd.DataFrame, filter_name: str
73
+ ) -> pd.DataFrame:
74
+ if filter_name == "All":
75
+ return full_df[current_df.columns]
76
+ else:
77
+ # actually filter by emoji
78
+ query_symbol = filter_name[0]
79
+ filtered_df = full_df[full_df[columns_dict["model_size_symbol"].pretty_name] == query_symbol]
80
+ return filtered_df[current_df.columns]
81
+
82
+ with gr.Column() as col:
83
+ # Add the controls
84
+ with gr.Accordion("➡️ See All Columns", open=False):
85
+ columns_to_select_visibility = [
86
+ c.pretty_name for c in columns_dict.values() if not c.fully_hidden and not c.never_hidden
87
+ ]
88
+ all_columns_selector = gr.CheckboxGroup(
89
+ choices=columns_to_select_visibility,
90
+ value=[
91
+ c.pretty_name
92
+ for c in columns_dict.values()
93
+ if c.pretty_name in columns_to_select_visibility and c.displayed_by_default
94
+ ],
95
+ label="Select Columns to Display:",
96
+ interactive=True,
97
+ container=False,
98
+ )
99
+ with gr.Row():
100
+ with gr.Column():
101
+ search_bar = gr.Textbox(
102
+ placeholder="🔍 Search for your model and press ENTER...",
103
+ show_label=False,
104
+ elem_id="search-bar",
105
+ )
106
+
107
+ # collect all column tags and create buttons for them
108
+ all_tags = {}
109
+ with gr.Column(variant="panel"):
110
+ gr.Markdown("Select Columns:")
111
+ for c in columns_dict.values():
112
+ for tag in c.tags:
113
+ if tag not in all_tags:
114
+ all_tags[tag] = gr.Button(tag, interactive=True, size="sm")
115
+
116
+ model_type_filter_selector = gr.Radio(
117
+ label="Filter model types:",
118
+ choices=["All", "💙 Open-Weights", "🟥 Proprietary"],
119
+ value="All",
120
+ elem_id="model-type-filter",
121
+ interactive=True,
122
+ )
123
+
124
+ model_size_filter_selector = gr.Radio(
125
+ label="Filter model sizes:",
126
+ choices=["All", "🛴 Tiny (<5B)", "🚗 Small (5-50B)", "🚚 Medium (50-100B)", "🚀 Large (>100B)"],
127
+ value="All",
128
+ elem_id="model-size-filter",
129
+ interactive=True,
130
+ )
131
+
132
+ # create the hidden and visible dataframes to display
133
+ hidden_leaderboard_df = gr.components.Dataframe(
134
+ value=dataframe,
135
+ datatype=[c.column_type for c in columns_dict.values()],
136
+ visible=False,
137
+ interactive=False,
138
+ )
139
+ leaderboard_df = gr.components.Dataframe(
140
+ value=dataframe[[c.pretty_name for c in columns_dict.values() if c.displayed_by_default]],
141
+ datatype=[c.column_type for c in columns_dict.values()],
142
+ elem_id="leaderboard-df",
143
+ interactive=False,
144
+ )
145
+ # add the callbacks
146
+ all_columns_selector.change(
147
+ fn=filter_dataframe_by_selected_columns,
148
+ inputs=[hidden_leaderboard_df, all_columns_selector],
149
+ outputs=[leaderboard_df],
150
+ )
151
+ search_bar.submit(
152
+ fn=filter_dataframe_by_search,
153
+ inputs=[hidden_leaderboard_df, leaderboard_df, search_bar],
154
+ outputs=[leaderboard_df],
155
+ )
156
+ model_type_filter_selector.change(
157
+ fn=filter_dataframe_by_model_type,
158
+ inputs=[hidden_leaderboard_df, leaderboard_df, model_type_filter_selector],
159
+ outputs=[leaderboard_df],
160
+ )
161
+ model_size_filter_selector.change(
162
+ fn=filter_dataframe_by_model_size,
163
+ inputs=[hidden_leaderboard_df, leaderboard_df, model_size_filter_selector],
164
+ outputs=[leaderboard_df],
165
+ )
166
+ for tag, button in all_tags.items():
167
+ button.click(
168
+ fn=filter_dataframe_by_selected_tag_columns,
169
+ inputs=[hidden_leaderboard_df, button],
170
+ outputs=[leaderboard_df, all_columns_selector, model_type_filter_selector, model_size_filter_selector],
171
+ )
172
+
173
+ # reload the leaderboard on the first load
174
+ filter_dataframe_by_selected_columns(dataframe, all_columns_selector.value)
175
+ return col
176
+
177
+
178
+ demo = gr.Blocks()
179
  with demo:
180
  gr.HTML(TITLE)
181
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
182
 
183
+ print(LEADERBOARD_U_MATH_DF)
184
+ print(LEADERBOARD_MU_MATH_DF)
185
+
186
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
187
+ with gr.TabItem("🏆 U-MATH", elem_id="u-math-benchmark-tab-table", id=0):
188
+ leaderboard_umath = init_leaderboard(LEADERBOARD_U_MATH_DF, U_MATH_COLUMNS_DICT)
189
 
190
+ with gr.TabItem("🏅 μ-MATH (Meta-Benchmark)", elem_id="mu-math-benchmark-tab-table", id=1):
191
+ leaderboard_mumath = init_leaderboard(LEADERBOARD_MU_MATH_DF, MU_MATH_COLUMNS_DICT)
192
 
193
+ with gr.TabItem("📝 About", elem_id="about-tab-table", id=2):
194
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
+ citation_button = gr.Textbox(
197
+ value=CITATION_TEXT,
198
+ label="📙 Citation",
199
+ lines=9,
200
+ elem_id="citation-button",
201
+ show_copy_button=True,
202
+ container=True,
203
+ )
 
204
 
205
  scheduler = BackgroundScheduler()
206
+ scheduler.add_job(restart_space, "interval", seconds=60 * 60)
207
  scheduler.start()
208
+ demo.queue(default_concurrency_limit=40).launch()
data/mu_math_eval_results.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "model_name": "mistralai/Ministral-8B-Instruct-2410",
4
+ "extract_model_name": "Qwen/Qwen2.5-72B-Instruct",
5
+ "mu_math": [0.664, 0.33, 0.651, 0.68, 0.701, 0.628],
6
+ "GPT-4o": [0.664, 0.332, 0.621, 0.71, 0.696, 0.637],
7
+ "Gemini-1.5-Pro": [0.672, 0.279, 0.709, 0.585, 0.798, 0.466],
8
+ "Llama-3.1-70B-Instruct": [0.675, 0.317, 0.619, 0.707, 0.541, 0.769],
9
+ "Qwen2.5-72B-Instruct": [0.646, 0.295, 0.626, 0.672, 0.719, 0.574]
10
+ },
11
+ {
12
+ "model_name": "meta-llama/Llama-3.3-70B-Instruct",
13
+ "extract_model_name": "Qwen/Qwen2.5-72B-Instruct",
14
+ "mu_math": [0.741, 0.496, 0.666, 0.827, 0.816, 0.682],
15
+ "GPT-4o": [0.731, 0.475, 0.636, 0.832, 0.802, 0.681],
16
+ "Gemini-1.5-Pro": [0.705, 0.394, 0.693, 0.732, 0.856, 0.508],
17
+ "Llama-3.1-70B-Instruct": [0.823, 0.605, 0.67, 0.908, 0.802, 0.832],
18
+ "Qwen2.5-72B-Instruct": [0.705, 0.421, 0.658, 0.767, 0.791, 0.627]
19
+ }
20
+ ]
data/u_math_eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "model_name": "gpt-4o-mini-2024-07-18",
4
+ "judge_model_name": "gpt-4o-mini-2024-07-18",
5
+ "u_math": [0.5123, 0.2345, 0.1234],
6
+ "differential_calc": [0.5123, 0.2345, 0.1234],
7
+ "integral_calc": [0.43, 0.23, 0.34],
8
+ "algebra": [0.98, 0.12, 0.34],
9
+ "multivariable_calculus": [0.98, 0.12, 0.34],
10
+ "precalculus_review": [0.8412, 0.1234, 0.1234],
11
+ "sequences_series": [0.1234, 0.1234, 0.1234]
12
+ }
13
+ ]
pyproject.toml CHANGED
@@ -1,8 +1,10 @@
1
  [tool.ruff]
 
 
 
2
  # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
  select = ["E", "F"]
4
  ignore = ["E501"] # line too long (black is taking care of this)
5
- line-length = 119
6
  fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
 
8
  [tool.isort]
 
1
  [tool.ruff]
2
+ line-length = 119
3
+
4
+ [tool.ruff.lint]
5
  # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
6
  select = ["E", "F"]
7
  ignore = ["E501"] # line too long (black is taking care of this)
 
8
  fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
9
 
10
  [tool.isort]
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
  APScheduler
2
  black
 
3
  datasets
4
  gradio
5
  gradio[oauth]
6
- gradio_leaderboard==0.0.13
7
  gradio_client
8
  huggingface-hub>=0.18.0
9
  matplotlib
@@ -13,4 +13,4 @@ python-dateutil
13
  tqdm
14
  transformers
15
  tokenizers>=0.15.0
16
- sentencepiece
 
1
  APScheduler
2
  black
3
+ isort
4
  datasets
5
  gradio
6
  gradio[oauth]
 
7
  gradio_client
8
  huggingface-hub>=0.18.0
9
  matplotlib
 
13
  tqdm
14
  transformers
15
  tokenizers>=0.15.0
16
+ sentencepiece
src/about.py CHANGED
@@ -1,6 +1,7 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
 
4
  @dataclass
5
  class Task:
6
  benchmark: str
@@ -11,17 +12,16 @@ class Task:
11
  # Select your tasks here
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
  task0 = Task("anli_r1", "acc", "ANLI")
16
  task1 = Task("logiqa", "acc_norm", "LogiQA")
17
 
18
- NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
 
 
21
 
22
 
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
@@ -29,7 +29,7 @@ Intro text
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
- LLM_BENCHMARKS_TEXT = f"""
33
  ## How it works
34
 
35
  ## Reproducibility
@@ -37,36 +37,12 @@ To reproduce our results, here is the commands you can run:
37
 
38
  """
39
 
40
- EVALUATION_QUEUE_TEXT = """
41
- ## Some good practices before submitting a model
42
-
43
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
- ```python
45
- from transformers import AutoConfig, AutoModel, AutoTokenizer
46
- config = AutoConfig.from_pretrained("your model name", revision=revision)
47
- model = AutoModel.from_pretrained("your model name", revision=revision)
48
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
- ```
50
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
-
52
- Note: make sure your model is public!
53
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
-
55
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
-
58
- ### 3) Make sure your model has an open license!
59
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
-
61
- ### 4) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
-
64
- ## In case of model failure
65
- If your model is displayed in the `FAILED` category, its execution stopped.
66
- Make sure you have followed the above steps first.
67
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
- """
69
-
70
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
- CITATION_BUTTON_TEXT = r"""
72
- """
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
4
+
5
  @dataclass
6
  class Task:
7
  benchmark: str
 
12
  # Select your tasks here
13
  # ---------------------------------------------------
14
  class Tasks(Enum):
15
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
16
  task0 = Task("anli_r1", "acc", "ANLI")
17
  task1 = Task("logiqa", "acc_norm", "LogiQA")
18
 
 
 
19
 
20
+ # ---------------------------------------------------
21
 
22
 
23
  # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">U-MATH / μ-MATH leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
 
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
+ LLM_BENCHMARKS_TEXT = """
33
  ## How it works
34
 
35
  ## Reproducibility
 
37
 
38
  """
39
 
40
+ CITATION_TEXT = r"""@misc{chernyshev2024umath,
41
+ title={U-MATH: A University-Level Benchmark for Evaluating Mathematical Skills in LLMs},
42
+ author={Konstantin Chernyshev and Vitaliy Polshkov and Ekaterina Artemova and Alex Myasnikov and Vlad Stepanov and Alexei Miasnikov and Sergei Tilga},
43
+ year={2024},
44
+ eprint={2412.03205},
45
+ archivePrefix={arXiv},
46
+ primaryClass={cs.CL},
47
+ url={https://arxiv.org/abs/2412.03205},
48
+ }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/css_html_js.py DELETED
@@ -1,105 +0,0 @@
1
- custom_css = """
2
-
3
- .markdown-text {
4
- font-size: 16px !important;
5
- }
6
-
7
- #models-to-add-text {
8
- font-size: 18px !important;
9
- }
10
-
11
- #citation-button span {
12
- font-size: 16px !important;
13
- }
14
-
15
- #citation-button textarea {
16
- font-size: 16px !important;
17
- }
18
-
19
- #citation-button > label > button {
20
- margin: 6px;
21
- transform: scale(1.3);
22
- }
23
-
24
- #leaderboard-table {
25
- margin-top: 15px
26
- }
27
-
28
- #leaderboard-table-lite {
29
- margin-top: 15px
30
- }
31
-
32
- #search-bar-table-box > div:first-child {
33
- background: none;
34
- border: none;
35
- }
36
-
37
- #search-bar {
38
- padding: 0px;
39
- }
40
-
41
- /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
- #leaderboard-table td:nth-child(2),
43
- #leaderboard-table th:nth-child(2) {
44
- max-width: 400px;
45
- overflow: auto;
46
- white-space: nowrap;
47
- }
48
-
49
- .tab-buttons button {
50
- font-size: 20px;
51
- }
52
-
53
- #scale-logo {
54
- border-style: none !important;
55
- box-shadow: none;
56
- display: block;
57
- margin-left: auto;
58
- margin-right: auto;
59
- max-width: 600px;
60
- }
61
-
62
- #scale-logo .download {
63
- display: none;
64
- }
65
- #filter_type{
66
- border: 0;
67
- padding-left: 0;
68
- padding-top: 0;
69
- }
70
- #filter_type label {
71
- display: flex;
72
- }
73
- #filter_type label > span{
74
- margin-top: var(--spacing-lg);
75
- margin-right: 0.5em;
76
- }
77
- #filter_type label > .wrap{
78
- width: 103px;
79
- }
80
- #filter_type label > .wrap .wrap-inner{
81
- padding: 2px;
82
- }
83
- #filter_type label > .wrap .wrap-inner input{
84
- width: 1px
85
- }
86
- #filter-columns-type{
87
- border:0;
88
- padding:0.5;
89
- }
90
- #filter-columns-size{
91
- border:0;
92
- padding:0.5;
93
- }
94
- #box-filter > .form{
95
- border: 0
96
- }
97
- """
98
-
99
- get_window_url_params = """
100
- function(url_params) {
101
- const params = new URLSearchParams(window.location.search);
102
- url_params = Object.fromEntries(params);
103
- return url_params;
104
- }
105
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/formatting.py DELETED
@@ -1,27 +0,0 @@
1
- def model_hyperlink(link, model_name):
2
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
-
4
-
5
- def make_clickable_model(model_name):
6
- link = f"https://huggingface.co/{model_name}"
7
- return model_hyperlink(link, model_name)
8
-
9
-
10
- def styled_error(error):
11
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
-
13
-
14
- def styled_warning(warn):
15
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
-
17
-
18
- def styled_message(message):
19
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
-
21
-
22
- def has_no_nan_values(df, columns):
23
- return df[columns].notna().all(axis=1)
24
-
25
-
26
- def has_nan_values(df, columns):
27
- return df[columns].isna().any(axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/utils.py DELETED
@@ -1,110 +0,0 @@
1
- from dataclasses import dataclass, make_dataclass
2
- from enum import Enum
3
-
4
- import pandas as pd
5
-
6
- from src.about import Tasks
7
-
8
- def fields(raw_class):
9
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
-
11
-
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
- @dataclass
16
- class ColumnContent:
17
- name: str
18
- type: str
19
- displayed_by_default: bool
20
- hidden: bool = False
21
- never_hidden: bool = False
22
-
23
- ## Leaderboard columns
24
- auto_eval_column_dict = []
25
- # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
-
43
- # We use make dataclass to dynamically fill the scores from Tasks
44
- AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
-
46
- ## For the queue columns in the submission tab
47
- @dataclass(frozen=True)
48
- class EvalQueueColumn: # Queue column
49
- model = ColumnContent("model", "markdown", True)
50
- revision = ColumnContent("revision", "str", True)
51
- private = ColumnContent("private", "bool", True)
52
- precision = ColumnContent("precision", "str", True)
53
- weight_type = ColumnContent("weight_type", "str", "Original")
54
- status = ColumnContent("status", "str", True)
55
-
56
- ## All the model information that we might need
57
- @dataclass
58
- class ModelDetails:
59
- name: str
60
- display_name: str = ""
61
- symbol: str = "" # emoji
62
-
63
-
64
- class ModelType(Enum):
65
- PT = ModelDetails(name="pretrained", symbol="🟢")
66
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
67
- IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
68
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
- Unknown = ModelDetails(name="", symbol="?")
70
-
71
- def to_str(self, separator=" "):
72
- return f"{self.value.symbol}{separator}{self.value.name}"
73
-
74
- @staticmethod
75
- def from_str(type):
76
- if "fine-tuned" in type or "🔶" in type:
77
- return ModelType.FT
78
- if "pretrained" in type or "🟢" in type:
79
- return ModelType.PT
80
- if "RL-tuned" in type or "🟦" in type:
81
- return ModelType.RL
82
- if "instruction-tuned" in type or "⭕" in type:
83
- return ModelType.IFT
84
- return ModelType.Unknown
85
-
86
- class WeightType(Enum):
87
- Adapter = ModelDetails("Adapter")
88
- Original = ModelDetails("Original")
89
- Delta = ModelDetails("Delta")
90
-
91
- class Precision(Enum):
92
- float16 = ModelDetails("float16")
93
- bfloat16 = ModelDetails("bfloat16")
94
- Unknown = ModelDetails("?")
95
-
96
- def from_str(precision):
97
- if precision in ["torch.float16", "float16"]:
98
- return Precision.float16
99
- if precision in ["torch.bfloat16", "bfloat16"]:
100
- return Precision.bfloat16
101
- return Precision.Unknown
102
-
103
- # Column selection
104
- COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
105
-
106
- EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
107
- EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
108
-
109
- BENCHMARK_COLS = [t.value.col_name for t in Tasks]
110
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/envs.py DELETED
@@ -1,25 +0,0 @@
1
- import os
2
-
3
- from huggingface_hub import HfApi
4
-
5
- # Info to change for your repository
6
- # ----------------------------------
7
- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
-
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
- # ----------------------------------
11
-
12
- REPO_ID = f"{OWNER}/leaderboard"
13
- QUEUE_REPO = f"{OWNER}/requests"
14
- RESULTS_REPO = f"{OWNER}/results"
15
-
16
- # If you setup a cache later, just change HF_HOME
17
- CACHE_PATH=os.getenv("HF_HOME", ".")
18
-
19
- # Local caches
20
- EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
- EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
- EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
- EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
-
25
- API = HfApi(token=TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/leaderboard/read_evals.py DELETED
@@ -1,196 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
- model: str
23
- revision: str # commit hash, "" if main
24
- results: dict
25
- precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
- license: str = "?"
30
- likes: int = 0
31
- num_params: int = 0
32
- date: str = "" # submission date of request file
33
- still_on_hub: bool = False
34
-
35
- @classmethod
36
- def init_from_json_file(self, json_filepath):
37
- """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- data = json.load(fp)
40
-
41
- config = data.get("config")
42
-
43
- # Precision
44
- precision = Precision.from_str(config.get("model_dtype"))
45
-
46
- # Get model and org
47
- org_and_model = config.get("model_name", config.get("model_args", None))
48
- org_and_model = org_and_model.split("/", 1)
49
-
50
- if len(org_and_model) == 1:
51
- org = None
52
- model = org_and_model[0]
53
- result_key = f"{model}_{precision.value.name}"
54
- else:
55
- org = org_and_model[0]
56
- model = org_and_model[1]
57
- result_key = f"{org}_{model}_{precision.value.name}"
58
- full_model = "/".join(org_and_model)
59
-
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
- architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
-
69
- # Extract results available in this file (some results are split in several files)
70
- results = {}
71
- for task in Tasks:
72
- task = task.value
73
-
74
- # We average all scores of a given metric (not all metrics are present in all files)
75
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
- if accs.size == 0 or any([acc is None for acc in accs]):
77
- continue
78
-
79
- mean_acc = np.mean(accs) * 100.0
80
- results[task.benchmark] = mean_acc
81
-
82
- return self(
83
- eval_name=result_key,
84
- full_model=full_model,
85
- org=org,
86
- model=model,
87
- results=results,
88
- precision=precision,
89
- revision= config.get("model_sha", ""),
90
- still_on_hub=still_on_hub,
91
- architecture=architecture
92
- )
93
-
94
- def update_with_request_file(self, requests_path):
95
- """Finds the relevant request file for the current model and updates info with it"""
96
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
-
98
- try:
99
- with open(request_file, "r") as f:
100
- request = json.load(f)
101
- self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
- self.license = request.get("license", "?")
104
- self.likes = request.get("likes", 0)
105
- self.num_params = request.get("params", 0)
106
- self.date = request.get("submitted_time", "")
107
- except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
-
110
- def to_dict(self):
111
- """Converts the Eval Result to a dict compatible with our dataframe display"""
112
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
- data_dict = {
114
- "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
- AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
- AutoEvalColumn.architecture.name: self.architecture,
120
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
- AutoEvalColumn.revision.name: self.revision,
122
- AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
- }
128
-
129
- for task in Tasks:
130
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
-
132
- return data_dict
133
-
134
-
135
- def get_request_file_for_model(requests_path, model_name, precision):
136
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
- request_files = os.path.join(
138
- requests_path,
139
- f"{model_name}_eval_request_*.json",
140
- )
141
- request_files = glob.glob(request_files)
142
-
143
- # Select correct request file (precision)
144
- request_file = ""
145
- request_files = sorted(request_files, reverse=True)
146
- for tmp_request_file in request_files:
147
- with open(tmp_request_file, "r") as f:
148
- req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
- request_file = tmp_request_file
154
- return request_file
155
-
156
-
157
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
- """From the path of the results folder root, extract all needed info for results"""
159
- model_result_filepaths = []
160
-
161
- for root, _, files in os.walk(results_path):
162
- # We should only have json files in model results
163
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
- continue
165
-
166
- # Sort the files by date
167
- try:
168
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
- except dateutil.parser._parser.ParserError:
170
- files = [files[-1]]
171
-
172
- for file in files:
173
- model_result_filepaths.append(os.path.join(root, file))
174
-
175
- eval_results = {}
176
- for model_result_filepath in model_result_filepaths:
177
- # Creation of result
178
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
- eval_result.update_with_request_file(requests_path)
180
-
181
- # Store results of same eval together
182
- eval_name = eval_result.eval_name
183
- if eval_name in eval_results.keys():
184
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
- else:
186
- eval_results[eval_name] = eval_result
187
-
188
- results = []
189
- for v in eval_results.values():
190
- try:
191
- v.to_dict() # we test if the dict version is complete
192
- results.append(v)
193
- except KeyError: # not all eval values present
194
- continue
195
-
196
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/populate.py CHANGED
@@ -1,58 +1,286 @@
1
  import json
2
  import os
 
3
 
4
  import pandas as pd
 
 
5
 
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
- """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path)
14
- all_data_json = [v.to_dict() for v in raw_data]
15
 
16
- df = pd.DataFrame.from_records(all_data_json)
17
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
- df = df[cols].round(decimals=2)
 
 
 
 
 
 
 
 
 
19
 
20
- # filter out if any of the benchmarks have not been produced
21
- df = df[has_no_nan_values(df, benchmark_cols)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  return df
23
 
24
 
25
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
- """Creates the different dataframes for the evaluation queues requestes"""
27
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
- all_evals = []
29
-
30
- for entry in entries:
31
- if ".json" in entry:
32
- file_path = os.path.join(save_path, entry)
33
- with open(file_path) as fp:
34
- data = json.load(fp)
35
-
36
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
-
39
- all_evals.append(data)
40
- elif ".md" not in entry:
41
- # this is a folder
42
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
43
- for sub_entry in sub_entries:
44
- file_path = os.path.join(save_path, entry, sub_entry)
45
- with open(file_path) as fp:
46
- data = json.load(fp)
47
-
48
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
- all_evals.append(data)
51
-
52
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
- return df_finished[cols], df_running[cols], df_pending[cols]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
2
  import os
3
+ from dataclasses import dataclass, field
4
 
5
  import pandas as pd
6
+ from huggingface_hub import model_info
7
+ from transformers import AutoConfig
8
 
 
 
 
9
 
10
+ def is_model_on_hub(
11
+ model_name: str, revision: str, token: str = None, trust_remote_code=False
12
+ ) -> tuple[bool, str | None, str | None]:
13
+ """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
14
+ try:
15
+ config = AutoConfig.from_pretrained(
16
+ model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
17
+ )
18
+ return True, None, config
19
+ except Exception:
20
+ return False, "was not found on hub!", None
21
 
 
 
 
 
22
 
23
+ def model_size_to_symbol(model_size_in_b_params: int | None) -> str:
24
+ """Converts model size to a symbol"""
25
+ if model_size_in_b_params is None or model_size_in_b_params == 0 or not model_size_in_b_params:
26
+ return "❓"
27
+ if model_size_in_b_params < 5:
28
+ return "🛴"
29
+ elif model_size_in_b_params < 50:
30
+ return "🚗"
31
+ elif model_size_in_b_params < 100:
32
+ return "🚚"
33
+ else:
34
+ return "🚀"
35
 
36
+
37
+ def model_type_to_symbol(model_type: str) -> str:
38
+ """Converts model type to a symbol"""
39
+ if model_type == "Open-Weights":
40
+ return "💙"
41
+ elif model_type == "Proprietary":
42
+ return "🟥"
43
+ else:
44
+ return "❓"
45
+
46
+
47
+ def get_hf_data_by_model_name(model_name: str) -> dict:
48
+ """Get model data from Hugging Face API by model name"""
49
+ still_on_hub, _, model_config = is_model_on_hub(model_name, "main", trust_remote_code=True)
50
+
51
+ architecture = "Unknown"
52
+ if model_config is not None:
53
+ architectures = getattr(model_config, "architectures", None)
54
+ if architectures:
55
+ architecture = ";".join(architectures)
56
+
57
+ num_params = None
58
+ if still_on_hub:
59
+ info = model_info(repo_id=model_name)
60
+ try:
61
+ num_params = round(info.safetensors["total"] / 1e9, 3)
62
+ except AttributeError as e:
63
+ print("SafeTensors not found in", model_name, e)
64
+ pass
65
+ print("num_params", model_name, num_params)
66
+
67
+ model_url = None
68
+ if still_on_hub:
69
+ model_url = f"https://huggingface.co/{model_name}"
70
+
71
+ model_license = "Unknown"
72
+ if model_config is not None:
73
+ info = model_info(repo_id=model_name)
74
+ # print(info.card_data)
75
+ model_license = info.card_data["license_name"]
76
+ model_license_link = info.card_data["license_link"]
77
+ if model_license_link:
78
+ model_license = f"[{model_license}]({model_license_link})"
79
+ if not model_license:
80
+ model_license = "Unknown"
81
+
82
+ return {
83
+ "model_architecture": architecture,
84
+ "model_type": "Open-Weights" if still_on_hub else "Proprietary",
85
+ "model_size": num_params if num_params else None,
86
+ "model_url": model_url,
87
+ "model_license": model_license,
88
+ }
89
+
90
+
91
+ @dataclass
92
+ class Field:
93
+ pretty_name: str
94
+ column_type: str
95
+ displayed_by_default: bool = True
96
+ never_hidden: bool = False
97
+ fully_hidden: bool = False
98
+ tags: list[str] = field(default_factory=list)
99
+
100
+
101
+ MODEL_COLUMNS_DICT = {
102
+ "model_type_symbol": Field("T", "str", never_hidden=True),
103
+ "model_size_symbol": Field("S", "str", never_hidden=True),
104
+ "model_name": Field("Model Name", "markdown", never_hidden=True),
105
+ "model_type": Field("Type", "str", displayed_by_default=False),
106
+ "model_size": Field("#Params (B)", "number", displayed_by_default=False),
107
+ "model_architecture": Field("Architecture", "str", displayed_by_default=False),
108
+ "model_license": Field("License", "markdown", displayed_by_default=False),
109
+ }
110
+
111
+ U_MATH_COLUMNS_DICT = {
112
+ "rank": Field("Rank", "number", never_hidden=True),
113
+ **MODEL_COLUMNS_DICT,
114
+ "judge_model_name": Field("Judge Model Name", "markdown", displayed_by_default=False),
115
+ "u_math_acc": Field("U-MATH Acc", "number", never_hidden=True, tags=["u_math"]),
116
+ "u_math_text_acc": Field("U-MATH Text Acc", "number", tags=["u_math", "text"]),
117
+ "u_math_visual_acc": Field("U-MATH Visual Acc", "number", tags=["u_math", "visual"]),
118
+ "differential_calc_acc": Field("Diff Calc Acc", "number", displayed_by_default=False, tags=["subjects"]),
119
+ "differential_calc_text_acc": Field("Diff Calc Text Acc", "number", displayed_by_default=False, tags=["text"]),
120
+ "differential_calc_visual_acc": Field(
121
+ "Diff Calc Visual Acc", "number", displayed_by_default=False, tags=["visual"]
122
+ ),
123
+ "integral_calc_acc": Field("Integral Calc Acc", "number", displayed_by_default=False, tags=["subjects"]),
124
+ "integral_calc_text_acc": Field("Integral Calc Text Acc", "number", displayed_by_default=False, tags=["text"]),
125
+ "integral_calc_visual_acc": Field(
126
+ "Integral Calc Visual Acc", "number", displayed_by_default=False, tags=["visual"]
127
+ ),
128
+ "algebra_acc": Field("Algebra Acc", "number", displayed_by_default=False, tags=["subjects"]),
129
+ "algebra_text_acc": Field("Algebra Text Acc", "number", displayed_by_default=False, tags=["text"]),
130
+ "algebra_visual_acc": Field("Algebra Visual Acc", "number", displayed_by_default=False, tags=["visual"]),
131
+ "multivariable_calculus_acc": Field("Multivar Calc Acc", "number", displayed_by_default=False, tags=["subjects"]),
132
+ "multivariable_calculus_text_acc": Field(
133
+ "Multivar Calc Text Acc", "number", displayed_by_default=False, tags=["text"]
134
+ ),
135
+ "multivariable_calculus_visual_acc": Field(
136
+ "Multivar Calc Visual Acc", "number", displayed_by_default=False, tags=["visual"]
137
+ ),
138
+ "precalculus_review_acc": Field("Precalc Acc", "number", displayed_by_default=False, tags=["subjects"]),
139
+ "precalculus_review_text_acc": Field("Precalc Text Acc", "number", displayed_by_default=False, tags=["text"]),
140
+ "precalculus_review_visual_acc": Field(
141
+ "Precalc Visual Acc", "number", displayed_by_default=False, tags=["visual"]
142
+ ),
143
+ "sequences_series_acc": Field("Seq & Series Acc", "number", displayed_by_default=False, tags=["subjects"]),
144
+ "sequences_series_text_acc": Field("Seq & Series Text Acc", "number", displayed_by_default=False, tags=["text"]),
145
+ "sequences_series_visual_acc": Field(
146
+ "Seq & Series Visual Acc", "number", displayed_by_default=False, tags=["visual"]
147
+ ),
148
+ }
149
+
150
+ MU_MATH_COLUMNS_DICT = {
151
+ "rank": Field("Rank", "number", never_hidden=True),
152
+ **MODEL_COLUMNS_DICT,
153
+ "extract_model_name": Field("Extract Model Name", "markdown", displayed_by_default=False),
154
+ "mu_math_f1": Field("μ-MATH F1", "number", never_hidden=True, tags=["mu_math", "splits"]),
155
+ "mu_math_tpr": Field("μ-MATH TPR", "number", displayed_by_default=False, tags=["mu_math"]),
156
+ "mu_math_tnr": Field("μ-MATH TNR", "number", displayed_by_default=False, tags=["mu_math"]),
157
+ "mu_math_ppv": Field("μ-MATH PPV", "number", displayed_by_default=False, tags=["mu_math"]),
158
+ "mu_math_npv": Field("μ-MATH NPV", "number", displayed_by_default=False, tags=["mu_math"]),
159
+ "GPT-4o_f1": Field("GPT-4o Subset F1", "number", tags=["splits"]),
160
+ "GPT-4o_tpr": Field("GPT-4o Subset TPR", "number", displayed_by_default=False),
161
+ "GPT-4o_tnr": Field("GPT-4o Subset TNR", "number", displayed_by_default=False),
162
+ "GPT-4o_ppv": Field("GPT-4o Subset PPV", "number", displayed_by_default=False),
163
+ "GPT-4o_npv": Field("GPT-4o Subset NPV", "number", displayed_by_default=False),
164
+ "Gemini-1.5-Pro_f1": Field("Gemini-1.5-Pro Subset F1", "number", tags=["splits"]),
165
+ "Gemini-1.5-Pro_tpr": Field("Gemini-1.5-Pro Subset TPR", "number", displayed_by_default=False),
166
+ "Gemini-1.5-Pro_tnr": Field("Gemini-1.5-Pro Subset TNR", "number", displayed_by_default=False),
167
+ "Gemini-1.5-Pro_ppv": Field("Gemini-1.5-Pro Subset PPV", "number", displayed_by_default=False),
168
+ "Gemini-1.5-Pro_npv": Field("Gemini-1.5-Pro Subset NPV", "number", displayed_by_default=False),
169
+ "Llama-3.1-70B-Instruct_f1": Field("Llama-3.1-70B Subset F1", "number", tags=["splits"]),
170
+ "Llama-3.1-70B-Instruct_tpr": Field("Llama-3.1-70B Subset TPR", "number", displayed_by_default=False),
171
+ "Llama-3.1-70B-Instruct_tnr": Field("Llama-3.1-70B Subset TNR", "number", displayed_by_default=False),
172
+ "Llama-3.1-70B-Instruct_ppv": Field("Llama-3.1-70B Subset PPV", "number", displayed_by_default=False),
173
+ "Llama-3.1-70B-Instruct_npv": Field("Llama-3.1-70B Subset NPV", "number", displayed_by_default=False),
174
+ "Qwen2.5-72B-Instruct_f1": Field("Qwen2.5-72B Subset F1", "number", tags=["splits"]),
175
+ "Qwen2.5-72B-Instruct_tpr": Field("Qwen2.5-72B Subset TPR", "number", displayed_by_default=False),
176
+ "Qwen2.5-72B-Instruct_tnr": Field("Qwen2.5-72B Subset TNR", "number", displayed_by_default=False),
177
+ "Qwen2.5-72B-Instruct_ppv": Field("Qwen2.5-72B Subset PPV", "number", displayed_by_default=False),
178
+ "Qwen2.5-72B-Instruct_npv": Field("Qwen2.5-72B Subset NPV", "number", displayed_by_default=False),
179
+ }
180
+
181
+
182
+ def load_json_data(json_path: str, main_col: str | None = None) -> pd.DataFrame:
183
+ """Loads json data from a file"""
184
+ with open(json_path, "r") as f:
185
+ data = json.load(f)
186
+ df = pd.DataFrame.from_records(data)
187
+ if main_col:
188
+ df = df.sort_values(by=[main_col], ascending=False)
189
+
190
+ for col in df.columns:
191
+ if df.dtypes[col] == "float64":
192
+ df[col] = df[col].round(decimals=2)
193
  return df
194
 
195
 
196
+ def get_u_math_leaderboard_df() -> pd.DataFrame:
197
+ """Creates a dataframe from json with U-MATH eval results"""
198
+ json_path = os.path.join("data", "u_math_eval_results.json")
199
+ df = load_json_data(json_path)
200
+
201
+ # flatten list [x, y, z] in columns as ["_acc", "_text_acc", "_visual_acc"] suffixes for columns
202
+ for col in [
203
+ "u_math",
204
+ "differential_calc",
205
+ "integral_calc",
206
+ "algebra",
207
+ "multivariable_calculus",
208
+ "precalculus_review",
209
+ "sequences_series",
210
+ ]:
211
+ df[col + "_acc"] = df[col].apply(lambda x: x[0])
212
+ df[col + "_text_acc"] = df[col].apply(lambda x: x[1])
213
+ df[col + "_visual_acc"] = df[col].apply(lambda x: x[2])
214
+ del df[col]
215
+
216
+ # Sort and add rank
217
+ df = df.sort_values(by=["u_math_acc"], ascending=False)
218
+ df["rank"] = range(1, len(df) + 1)
219
+
220
+ # populate with model info
221
+ model_to_meta_dict = {
222
+ model_name: get_hf_data_by_model_name(model_name) for model_name in df["model_name"].unique()
223
+ }
224
+ df["model_architecture"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_architecture"])
225
+ df["model_license"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_license"])
226
+ df["model_type"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_type"])
227
+ df["model_type_symbol"] = df["model_type"].apply(model_type_to_symbol)
228
+ df["model_size"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_size"])
229
+ df["model_size_symbol"] = df["model_size"].apply(model_size_to_symbol)
230
+ df["model_name"] = df["model_name"].apply(
231
+ lambda x: f"[{x}]({url})" if (url := model_to_meta_dict[x]["model_url"]) else x
232
+ )
233
+
234
+ # convert to pretty names and sort columns by order in dict
235
+ df = df[U_MATH_COLUMNS_DICT.keys()]
236
+ df = df.rename(columns={key: col.pretty_name for key, col in U_MATH_COLUMNS_DICT.items() if key in df.columns})
237
+
238
+ return df
239
+
240
+
241
+ def get_mu_math_leaderboard_df() -> pd.DataFrame:
242
+ """Creates a dataframe from json with mu-MATH eval results"""
243
+ json_path = os.path.join("data", "mu_math_eval_results.json")
244
+ df = load_json_data(json_path)
245
+
246
+ # Calculate columns with prefixes f1, tpr, tnr, ppv, npv
247
+ for col in ["mu_math", "GPT-4o", "Gemini-1.5-Pro", "Llama-3.1-70B-Instruct", "Qwen2.5-72B-Instruct"]:
248
+ df[col + "_f1"] = df[col].apply(lambda x: x[0])
249
+ df[col + "_tpr"] = df[col].apply(lambda x: x[1])
250
+ df[col + "_tnr"] = df[col].apply(lambda x: x[2])
251
+ df[col + "_ppv"] = df[col].apply(lambda x: x[3])
252
+ df[col + "_npv"] = df[col].apply(lambda x: x[4])
253
+ del df[col]
254
+
255
+ # # flatten list [x, y, z] in columns as ["_f1", "_precision", "_recall"] suffixes for columns
256
+ # for col in ["mu_math", "GPT-4o", "Gemini-1.5-Pro", "Llama-3.1-70B-Instruct", "Qwen2.5-72B-Instruct"]:
257
+ # df[col + "_f1"] = df[col].apply(lambda x: x[0])
258
+ # df[col + "_precision"] = df[col].apply(lambda x: x[1])
259
+ # df[col + "_recall"] = df[col].apply(lambda x: x[2])
260
+ # del df[col]
261
+
262
+ NUM_MU_MATH_SAMPLES = 1084
263
+
264
+ # Sort and add rank
265
+ df = df.sort_values(by=["mu_math_f1"], ascending=False)
266
+ df["rank"] = range(1, len(df) + 1)
267
+
268
+ # populate with model info
269
+ model_to_meta_dict = {
270
+ model_name: get_hf_data_by_model_name(model_name) for model_name in df["model_name"].unique()
271
+ }
272
+ df["model_architecture"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_architecture"])
273
+ df["model_license"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_license"])
274
+ df["model_type"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_type"])
275
+ df["model_type_symbol"] = df["model_type"].apply(model_type_to_symbol)
276
+ df["model_size"] = df["model_name"].apply(lambda x: model_to_meta_dict[x]["model_size"])
277
+ df["model_size_symbol"] = df["model_size"].apply(model_size_to_symbol)
278
+ df["model_name"] = df["model_name"].apply(
279
+ lambda x: f"[{x}]({url})" if (url := model_to_meta_dict[x]["model_url"]) else x
280
+ )
281
+
282
+ # convert to pretty names and sort columns by order in dict
283
+ df = df[MU_MATH_COLUMNS_DICT.keys()]
284
+ df = df.rename(columns={key: col.pretty_name for key, col in MU_MATH_COLUMNS_DICT.items() if key in df.columns})
285
+
286
+ return df
src/submission/check_validity.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
- try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
- if test_tokenizer:
39
- try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
- except ValueError as e:
42
- return (
43
- False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
- )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
- return True, None, config
50
-
51
- except ValueError:
52
- return (
53
- False,
54
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
- )
57
-
58
- except Exception as e:
59
- return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,119 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )