gardarjuto commited on
Commit
117d89c
·
1 Parent(s): 9b8b426

remove submit tab

Browse files
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import subprocess
2
  import gradio as gr
3
  import pandas as pd
4
  import pandas.io.formats.style as style
@@ -6,7 +5,6 @@ from apscheduler.schedulers.background import BackgroundScheduler
6
  from huggingface_hub import snapshot_download
7
 
8
  from src.about import (
9
- EVALUATION_QUEUE_TEXT,
10
  INTRODUCTION_TEXT,
11
  LLM_BENCHMARKS_TEXT,
12
  TITLE,
@@ -15,20 +13,15 @@ from src.display.css_html_js import custom_css
15
  from src.display.utils import (
16
  BENCHMARK_COLS,
17
  COLS,
18
- EVAL_COLS,
19
- EVAL_TYPES,
20
  NUMERIC_INTERVALS,
21
  TYPES,
22
  AutoEvalColumn,
23
  ModelType,
24
- ModelAPI,
25
  fields,
26
- WeightType,
27
  Precision
28
  )
29
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
- from src.submission.submit import add_new_eval
32
 
33
 
34
  def restart_space():
@@ -53,13 +46,6 @@ except Exception:
53
  raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
54
  leaderboard_df = original_df.copy()
55
 
56
- (
57
- finished_eval_queue_df,
58
- running_eval_queue_df,
59
- pending_eval_queue_df,
60
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
61
-
62
-
63
  # Searching and filtering
64
  def update_table(
65
  hidden_df: pd.DataFrame,
@@ -251,101 +237,6 @@ with demo:
251
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
252
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
253
 
254
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
255
- with gr.Column():
256
- with gr.Row():
257
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
258
-
259
- with gr.Column():
260
- with gr.Accordion(
261
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
262
- open=False,
263
- ):
264
- with gr.Row():
265
- finished_eval_table = gr.components.Dataframe(
266
- value=finished_eval_queue_df,
267
- headers=EVAL_COLS,
268
- datatype=EVAL_TYPES,
269
- row_count=5,
270
- )
271
- with gr.Accordion(
272
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
273
- open=False,
274
- ):
275
- with gr.Row():
276
- running_eval_table = gr.components.Dataframe(
277
- value=running_eval_queue_df,
278
- headers=EVAL_COLS,
279
- datatype=EVAL_TYPES,
280
- row_count=5,
281
- )
282
-
283
- with gr.Accordion(
284
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
285
- open=False,
286
- ):
287
- with gr.Row():
288
- pending_eval_table = gr.components.Dataframe(
289
- value=pending_eval_queue_df,
290
- headers=EVAL_COLS,
291
- datatype=EVAL_TYPES,
292
- row_count=5,
293
- )
294
- with gr.Row():
295
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
296
-
297
- with gr.Row():
298
- with gr.Column():
299
- model_api = gr.Dropdown(
300
- choices=[a.value.name for a in ModelAPI],
301
- label="Model API",
302
- multiselect=False,
303
- value="hf",
304
- interactive=True,
305
- )
306
- model_name_textbox = gr.Textbox(label="Model name")
307
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
308
- model_type = gr.Dropdown(
309
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
310
- label="Model type",
311
- multiselect=False,
312
- value=None,
313
- interactive=True,
314
- )
315
-
316
- with gr.Column():
317
- precision = gr.Dropdown(
318
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
319
- label="Precision",
320
- multiselect=False,
321
- value="float16",
322
- interactive=True,
323
- )
324
- weight_type = gr.Dropdown(
325
- choices=[i.value.name for i in WeightType],
326
- label="Weights type",
327
- multiselect=False,
328
- value="Original",
329
- interactive=True,
330
- )
331
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
332
-
333
- submit_button = gr.Button("Submit Eval")
334
- submission_result = gr.Markdown()
335
- submit_button.click(
336
- add_new_eval,
337
- [
338
- model_api,
339
- model_name_textbox,
340
- base_model_name_textbox,
341
- revision_name_textbox,
342
- precision,
343
- weight_type,
344
- model_type,
345
- ],
346
- submission_result,
347
- )
348
-
349
 
350
  scheduler = BackgroundScheduler()
351
  scheduler.add_job(restart_space, "interval", hours=1)
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  import pandas.io.formats.style as style
 
5
  from huggingface_hub import snapshot_download
6
 
7
  from src.about import (
 
8
  INTRODUCTION_TEXT,
9
  LLM_BENCHMARKS_TEXT,
10
  TITLE,
 
13
  from src.display.utils import (
14
  BENCHMARK_COLS,
15
  COLS,
 
 
16
  NUMERIC_INTERVALS,
17
  TYPES,
18
  AutoEvalColumn,
19
  ModelType,
 
20
  fields,
 
21
  Precision
22
  )
23
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
24
+ from src.populate import get_leaderboard_df
 
25
 
26
 
27
  def restart_space():
 
46
  raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
47
  leaderboard_df = original_df.copy()
48
 
 
 
 
 
 
 
 
49
  # Searching and filtering
50
  def update_table(
51
  hidden_df: pd.DataFrame,
 
237
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
238
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
  scheduler = BackgroundScheduler()
242
  scheduler.add_job(restart_space, "interval", hours=1)
src/about.py CHANGED
@@ -20,7 +20,6 @@ class Tasks(Enum):
20
  task5 = Task("icelandic_belebele", "exact_match,get-answer", "Belebele (IS)")
21
  task6 = Task("icelandic_arc_challenge", "exact_match,get-answer", "ARC-Challenge-IS")
22
 
23
- NUM_FEWSHOT = 0 # Change with your few shot
24
  # ---------------------------------------------------
25
 
26
 
@@ -70,33 +69,3 @@ A machine-translated version of the ARC-Challenge multiple-choice question-answe
70
 
71
  """
72
 
73
- EVALUATION_QUEUE_TEXT = """
74
- ## Some good practices before submitting a model
75
-
76
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
77
- ```python
78
- from transformers import AutoConfig, AutoModel, AutoTokenizer
79
- config = AutoConfig.from_pretrained("your model name", revision=revision)
80
- model = AutoModel.from_pretrained("your model name", revision=revision)
81
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
82
- ```
83
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
84
-
85
- Note: make sure your model is public!
86
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
87
-
88
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
89
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
90
-
91
- ### 3) Make sure your model has an open license!
92
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
93
-
94
- ### 4) Fill up your model card
95
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
96
-
97
- ## In case of model failure
98
- If your model is displayed in the `FAILED` category, its execution stopped.
99
- Make sure you have followed the above steps first.
100
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
101
- """
102
-
 
20
  task5 = Task("icelandic_belebele", "exact_match,get-answer", "Belebele (IS)")
21
  task6 = Task("icelandic_arc_challenge", "exact_match,get-answer", "ARC-Challenge-IS")
22
 
 
23
  # ---------------------------------------------------
24
 
25
 
 
69
 
70
  """
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/utils.py CHANGED
@@ -32,7 +32,6 @@ for task in Tasks:
32
  # Model information
33
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
  auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
  auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
  auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
@@ -50,7 +49,6 @@ class EvalQueueColumn: # Queue column
50
  revision = ColumnContent("revision", "str", True)
51
  private = ColumnContent("private", "bool", True)
52
  precision = ColumnContent("precision", "str", True)
53
- weight_type = ColumnContent("weight_type", "str", "Original")
54
  status = ColumnContent("status", "str", True)
55
 
56
  ## All the model information that we might need
@@ -89,18 +87,10 @@ class ModelType(Enum):
89
  return ModelType.IFT
90
  return ModelType.Unknown
91
 
92
- class WeightType(Enum):
93
- Adapter = ModelDetails("Adapter")
94
- Original = ModelDetails("Original")
95
- Delta = ModelDetails("Delta")
96
-
97
  class Precision(Enum):
98
  float16 = ModelDetails("float16")
99
  bfloat16 = ModelDetails("bfloat16")
100
  float32 = ModelDetails("float32")
101
- #qt_8bit = ModelDetails("8bit")
102
- #qt_4bit = ModelDetails("4bit")
103
- #qt_GPTQ = ModelDetails("GPTQ")
104
  Unknown = ModelDetails("?")
105
 
106
  def from_str(precision):
@@ -110,12 +100,6 @@ class Precision(Enum):
110
  return Precision.bfloat16
111
  if precision in ["float32"]:
112
  return Precision.float32
113
- #if precision in ["8bit"]:
114
- # return Precision.qt_8bit
115
- #if precision in ["4bit"]:
116
- # return Precision.qt_4bit
117
- #if precision in ["GPTQ", "None"]:
118
- # return Precision.qt_GPTQ
119
  return Precision.Unknown
120
 
121
  # Column selection
 
32
  # Model information
33
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
 
35
  auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
36
  auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
37
  auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
 
49
  revision = ColumnContent("revision", "str", True)
50
  private = ColumnContent("private", "bool", True)
51
  precision = ColumnContent("precision", "str", True)
 
52
  status = ColumnContent("status", "str", True)
53
 
54
  ## All the model information that we might need
 
87
  return ModelType.IFT
88
  return ModelType.Unknown
89
 
 
 
 
 
 
90
  class Precision(Enum):
91
  float16 = ModelDetails("float16")
92
  bfloat16 = ModelDetails("bfloat16")
93
  float32 = ModelDetails("float32")
 
 
 
94
  Unknown = ModelDetails("?")
95
 
96
  def from_str(precision):
 
100
  return Precision.bfloat16
101
  if precision in ["float32"]:
102
  return Precision.float32
 
 
 
 
 
 
103
  return Precision.Unknown
104
 
105
  # Column selection
src/leaderboard/read_evals.py CHANGED
@@ -8,7 +8,7 @@ import dateutil
8
  import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
  from src.submission.check_validity import is_model_on_hub
13
 
14
 
@@ -24,7 +24,6 @@ class EvalResult:
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
  architecture: str = "Unknown"
29
  license: str = "?"
30
  likes: int = 0
@@ -99,7 +98,6 @@ class EvalResult:
99
  with open(request_file, "r") as f:
100
  request = json.load(f)
101
  self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
  self.license = request.get("license", "?")
104
  self.likes = request.get("likes", 0)
105
  self.num_params = request.get("params", 0)
@@ -115,7 +113,6 @@ class EvalResult:
115
  AutoEvalColumn.precision.name: self.precision.value.name,
116
  AutoEvalColumn.model_type.name: self.model_type.value.name,
117
  AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
  AutoEvalColumn.architecture.name: self.architecture,
120
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
  AutoEvalColumn.revision.name: self.revision,
 
8
  import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision
12
  from src.submission.check_validity import is_model_on_hub
13
 
14
 
 
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
 
27
  architecture: str = "Unknown"
28
  license: str = "?"
29
  likes: int = 0
 
98
  with open(request_file, "r") as f:
99
  request = json.load(f)
100
  self.model_type = ModelType.from_str(request.get("model_type", ""))
 
101
  self.license = request.get("license", "?")
102
  self.likes = request.get("likes", 0)
103
  self.num_params = request.get("params", 0)
 
113
  AutoEvalColumn.precision.name: self.precision.value.name,
114
  AutoEvalColumn.model_type.name: self.model_type.value.name,
115
  AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
 
116
  AutoEvalColumn.architecture.name: self.architecture,
117
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
118
  AutoEvalColumn.revision.name: self.revision,
src/populate.py CHANGED
@@ -1,10 +1,7 @@
1
- import json
2
- import os
3
-
4
  import pandas as pd
5
 
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
 
@@ -20,39 +17,3 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
20
  # filter out if any of the benchmarks have not been produced
21
  df = df[has_no_nan_values(df, benchmark_cols)]
22
  return raw_data, df
23
-
24
-
25
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
- """Creates the different dataframes for the evaluation queues requestes"""
27
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
- all_evals = []
29
-
30
- for entry in entries:
31
- if ".json" in entry:
32
- file_path = os.path.join(save_path, entry)
33
- with open(file_path) as fp:
34
- data = json.load(fp)
35
-
36
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
-
39
- all_evals.append(data)
40
- elif ".md" not in entry:
41
- # this is a folder
42
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
43
- for sub_entry in sub_entries:
44
- file_path = os.path.join(save_path, entry, sub_entry)
45
- with open(file_path) as fp:
46
- data = json.load(fp)
47
-
48
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
- all_evals.append(data)
51
-
52
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
- return df_finished[cols], df_running[cols], df_pending[cols]
 
 
 
 
1
  import pandas as pd
2
 
3
+ from src.display.formatting import has_no_nan_values
4
+ from src.display.utils import AutoEvalColumn
5
  from src.leaderboard.read_evals import get_raw_eval_results
6
 
7
 
 
17
  # filter out if any of the benchmarks have not been produced
18
  df = df[has_no_nan_values(df, benchmark_cols)]
19
  return raw_data, df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/check_validity.py CHANGED
@@ -1,99 +1,36 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
  from transformers import AutoConfig
11
  from transformers.models.auto.tokenization_auto import AutoTokenizer
12
 
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
 
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
 
 
35
  """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
  try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
 
 
38
  if test_tokenizer:
39
  try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
 
 
41
  except ValueError as e:
 
 
42
  return (
43
  False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
  )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
  return True, None, config
50
 
51
  except ValueError:
52
  return (
53
  False,
54
  "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
  )
57
 
58
  except Exception as e:
59
  return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
1
  from transformers import AutoConfig
2
  from transformers.models.auto.tokenization_auto import AutoTokenizer
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ def is_model_on_hub(
6
+ model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False
7
+ ) -> tuple[bool, str]:
8
  """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
9
  try:
10
+ config = AutoConfig.from_pretrained(
11
+ model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
12
+ )
13
  if test_tokenizer:
14
  try:
15
+ tk = AutoTokenizer.from_pretrained(
16
+ model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
17
+ )
18
  except ValueError as e:
19
+ return (False, f"uses a tokenizer which is not in a transformers release: {e}", None)
20
+ except Exception as e:
21
  return (
22
  False,
23
+ "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
24
+ None,
25
  )
 
 
26
  return True, None, config
27
 
28
  except ValueError:
29
  return (
30
  False,
31
  "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
32
+ None,
33
  )
34
 
35
  except Exception as e:
36
  return False, "was not found on hub!", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,128 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model_api: str,
19
- model: str,
20
- base_model: str,
21
- revision: str,
22
- precision: str,
23
- weight_type: str,
24
- model_type: str,
25
- ):
26
- global REQUESTED_MODELS
27
- global USERS_TO_SUBMISSION_DATES
28
- if not REQUESTED_MODELS:
29
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
30
-
31
- user_name = ""
32
- model_path = model
33
- if "/" in model:
34
- user_name = model.split("/")[0]
35
- model_path = model.split("/")[1]
36
-
37
- precision = precision.split(" ")[0]
38
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
39
-
40
- if model_api not in ["hf", "openai-chat-completions", "anthropic-chat-completions"]:
41
- return styled_error('Please select a model API from one of "hf", "openai-chat-completions" or "anthropic-chat-completions"')
42
-
43
- if model_type is None or model_type == "":
44
- return styled_error("Please select a model type.")
45
-
46
- if model_api in ["openai-chat-completions", "anthropic-chat-completions"]:
47
- # Don't need to check for model details for these APIs
48
- print("Adding new eval for OpenAI/Anthropic model")
49
- else:
50
- # Does the model actually exist?
51
- if revision == "":
52
- revision = "main"
53
-
54
- # Is the model on the hub?
55
- if weight_type in ["Delta", "Adapter"]:
56
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
57
- if not base_model_on_hub:
58
- return styled_error(f'Base model "{base_model}" {error}')
59
-
60
- if not weight_type == "Adapter":
61
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
62
- if not model_on_hub:
63
- return styled_error(f'Model "{model}" {error}')
64
-
65
- # Is the model info correctly filled?
66
- try:
67
- model_info = API.model_info(repo_id=model, revision=revision)
68
- except Exception:
69
- return styled_error("Could not get your model information. Please fill it up properly.")
70
-
71
- model_size = get_model_size(model_info=model_info, precision=precision)
72
-
73
- # Were the model card and license filled?
74
- try:
75
- license = model_info.cardData["license"]
76
- except Exception:
77
- return styled_error("Please select a license for your model")
78
-
79
- modelcard_OK, error_msg = check_model_card(model)
80
- if not modelcard_OK:
81
- return styled_error(error_msg)
82
-
83
- # Seems good, creating the eval
84
- print("Adding new eval for HF model")
85
-
86
- eval_entry = {
87
- "model_api": model_api,
88
- "model": model,
89
- "base_model": base_model,
90
- "revision": revision,
91
- "precision": precision if model_api == "hf" else None,
92
- "weight_type": weight_type,
93
- "status": "PENDING",
94
- "submitted_time": current_time,
95
- "model_type": model_type,
96
- "likes": model_info.likes if model_api == "hf" else None,
97
- "params": model_size if model_api == "hf" else None,
98
- "license": license if model_api == "hf" else None,
99
- "private": False if model_api == "hf" else True,
100
- }
101
-
102
- # Check for duplicate submission
103
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
104
- return styled_warning("This model has been already submitted.")
105
-
106
- print("Creating eval file")
107
- OUT_DIR = os.path.join(EVAL_REQUESTS_PATH, user_name)
108
- os.makedirs(OUT_DIR, exist_ok=True)
109
- out_path = os.path.join(OUT_DIR, f"{model_path}_eval_request_False_{precision}_{weight_type}.json")
110
-
111
- with open(out_path, "w") as f:
112
- f.write(json.dumps(eval_entry))
113
-
114
- print("Uploading eval file")
115
- API.upload_file(
116
- path_or_fileobj=out_path,
117
- path_in_repo=out_path.split("eval-queue/")[1],
118
- repo_id=QUEUE_REPO,
119
- repo_type="dataset",
120
- commit_message=f"Add {model} to eval queue",
121
- )
122
-
123
- # Remove the local file
124
- os.remove(out_path)
125
-
126
- return styled_message(
127
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
128
- )