mariagrandury commited on
Commit
d11223d
·
1 Parent(s): 37a8577

Add models to eval queue

Browse files
Files changed (25) hide show
  1. .gitignore +0 -1
  2. AIDC-AI/Marco-LLM-ES_eval_request_False_bfloat16_Original.json +1 -1
  3. HuggingFaceTB/SmolLM2-1.7B-Instruct_eval_request_False_bfloat16_Original.json +1 -0
  4. HuggingFaceTB/SmolLM2-1.7B_eval_request_False_bfloat16_Original.json +1 -0
  5. Iker/Llama-3-Instruct-Neurona-8b-v2_eval_request_False_bfloat16_Original.json +1 -1
  6. Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8_eval_request_False_float16_Original.json +1 -0
  7. Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4_eval_request_False_float16_Original.json +1 -0
  8. Qwen/Qwen2.5-3B-Instruct_eval_request_False_bfloat16_Original.json +1 -0
  9. Qwen/Qwen2.5-3B_eval_request_False_bfloat16_Original.json +1 -0
  10. Qwen/Qwen3-1.7B_eval_request_False_bfloat16_Original.json +1 -0
  11. Qwen/Qwen3-14B-AWQ_eval_request_False_float16_Original.json +1 -0
  12. TheBloke/Llama-2-13B-chat-GPTQ_eval_request_False_float16_Original.json +1 -0
  13. google/gemma-3-1b-it_eval_request_False_bfloat16_Original.json +1 -0
  14. ibm-granite/granite-3.0-8b-base_eval_request_False_float32_Original.json +1 -1
  15. ibm-granite/granite-3.0-8b-instruct_eval_request_False_float32_Original.json +1 -1
  16. meta-llama/Llama-3.2-3B-Instruct_eval_request_False_bfloat16_Original.json +1 -0
  17. meta-llama/Llama-3.2-3B_eval_request_False_bfloat16_Original.json +1 -0
  18. microsoft/Phi-3.5-mini-instruct_eval_request_False_float32_Original.json +1 -1
  19. openGPT-X/Teuken-7B-instruct-commercial-v0.4_eval_request_False_bfloat16_Original.json +1 -0
  20. openGPT-X/Teuken-7B-instruct-research-v0.4_eval_request_False_bfloat16_Original.json +1 -0
  21. orai-nlp/Llama-eus-8B_eval_request_False_bfloat16_Original.json +1 -1
  22. sandbox-ai/Llama-3.1-Tango-8b-f16_eval_request_False_float16_Original.json +1 -1
  23. scripts/generate.py +20 -21
  24. scripts/models.csv +120 -29
  25. tiiuae/Falcon3-7B-Base_eval_request_False_bfloat16_Original.json +1 -1
.gitignore CHANGED
@@ -1 +0,0 @@
1
- scripts/
 
 
AIDC-AI/Marco-LLM-ES_eval_request_False_bfloat16_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "AIDC-AI/Marco-LLM-ES", "base_model": "", "revision": "2e36ce00a312b5187ddb06df8db29a59acf60245", "precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2025-01-14T12:41:07Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": -1, "license": "apache-2.0", "private": false, "sender": "ChenyangLyu"}
 
1
+ {"model": "AIDC-AI/Marco-LLM-ES", "base_model": "", "revision": "2e36ce00a312b5187ddb06df8db29a59acf60245", "precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": -1, "license": "apache-2.0", "private": false, "sender": "ChenyangLyu"}
HuggingFaceTB/SmolLM2-1.7B-Instruct_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "HuggingFaceTB/SmolLM2-1.7B-Instruct", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-21T20:10:27Z", "model_type": "instruction-tuned", "likes": 0, "params": 1.71, "license": "custom", "architecture": "", "sender": "mariagrandury"}
HuggingFaceTB/SmolLM2-1.7B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "HuggingFaceTB/SmolLM2-1.7B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 1.71, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Iker/Llama-3-Instruct-Neurona-8b-v2_eval_request_False_bfloat16_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "Iker/Llama-3-Instruct-Neurona-8b-v2", "base_model": "", "revision": "92b369f60dae263d14f496e7215ee89c80473660", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-09-26T08:53:03Z", "model_type": "\u2b55 : instruction-tuned", "likes": 2, "params": 8.03, "license": "llama3", "private": false, "sender": "Iker"}
 
1
+ {"model": "Iker/Llama-3-Instruct-Neurona-8b-v2", "base_model": "", "revision": "92b369f60dae263d14f496e7215ee89c80473660", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-09-26T08:53:03Z", "model_type": "\u2b55 : instruction-tuned", "likes": 2, "params": 8.03, "license": "llama3", "private": false, "sender": "Iker"}
Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-21T20:10:27Z", "model_type": "instruction-tuned", "likes": 0, "params": 4.99, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T18:13:32Z", "model_type": "instruction-tuned", "likes": 0, "params": 5.74, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Qwen/Qwen2.5-3B-Instruct_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen2.5-3B-Instruct", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "instruction-tuned", "likes": 0, "params": 3.09, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Qwen/Qwen2.5-3B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen2.5-3B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 3.09, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Qwen/Qwen3-1.7B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen3-1.7B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-21T20:10:27Z", "model_type": "instruction-tuned", "likes": 0, "params": 2.03, "license": "custom", "architecture": "", "sender": "mariagrandury"}
Qwen/Qwen3-14B-AWQ_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Qwen/Qwen3-14B-AWQ", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-21T20:10:27Z", "model_type": "instruction-tuned", "likes": 0, "params": 3.32, "license": "custom", "architecture": "", "sender": "mariagrandury"}
TheBloke/Llama-2-13B-chat-GPTQ_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "TheBloke/Llama-2-13B-chat-GPTQ", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T18:13:32Z", "model_type": "instruction-tuned", "likes": 0, "params": 2.03, "license": "custom", "architecture": "", "sender": "mariagrandury"}
google/gemma-3-1b-it_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "google/gemma-3-1b-it", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-21T20:10:27Z", "model_type": "instruction-tuned", "likes": 0, "params": 1.0, "license": "custom", "architecture": "", "sender": "mariagrandury"}
ibm-granite/granite-3.0-8b-base_eval_request_False_float32_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "ibm-granite/granite-3.0-8b-base", "base_model": "", "revision": "23357b69523bd98523496a5aba1f48bdea04a137", "precision": "float32", "architecture": "GraniteForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-12-03T11:36:41Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 21, "params": 8.171, "license": "apache-2.0", "private": false, "sender": "asier-gutierrez"}
 
1
+ {"model": "ibm-granite/granite-3.0-8b-base", "base_model": "", "revision": "23357b69523bd98523496a5aba1f48bdea04a137", "precision": "float32", "architecture": "GraniteForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-12-03T11:36:41Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 21, "params": 8.171, "license": "apache-2.0", "private": false, "sender": "asier-gutierrez"}
ibm-granite/granite-3.0-8b-instruct_eval_request_False_float32_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "ibm-granite/granite-3.0-8b-instruct", "base_model": "", "revision": "8fe1e202a17f7763bd0af471253e00cc846d1c05", "precision": "float32", "architecture": "GraniteForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-12-03T11:36:17Z", "model_type": "\u2b55 : instruction-tuned", "likes": 180, "params": 8.171, "license": "apache-2.0", "private": false, "sender": "asier-gutierrez"}
 
1
+ {"model": "ibm-granite/granite-3.0-8b-instruct", "base_model": "", "revision": "8fe1e202a17f7763bd0af471253e00cc846d1c05", "precision": "float32", "architecture": "GraniteForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-12-03T11:36:17Z", "model_type": "\u2b55 : instruction-tuned", "likes": 180, "params": 8.171, "license": "apache-2.0", "private": false, "sender": "asier-gutierrez"}
meta-llama/Llama-3.2-3B-Instruct_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "meta-llama/Llama-3.2-3B-Instruct", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "instruction-tuned", "likes": 0, "params": 3.21, "license": "custom", "architecture": "", "sender": "mariagrandury"}
meta-llama/Llama-3.2-3B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "meta-llama/Llama-3.2-3B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 3.21, "license": "custom", "architecture": "", "sender": "mariagrandury"}
microsoft/Phi-3.5-mini-instruct_eval_request_False_float32_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "microsoft/Phi-3.5-mini-instruct", "base_model": "", "revision": "af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0", "precision": "float32", "architecture": "Phi3ForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-12-03T11:35:21Z", "model_type": "\u2b55 : instruction-tuned", "likes": 664, "params": 3.821, "license": "mit", "private": false, "sender": "asier-gutierrez"}
 
1
+ {"model": "microsoft/Phi-3.5-mini-instruct", "base_model": "", "revision": "af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0", "precision": "float32", "architecture": "Phi3ForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-12-03T11:35:21Z", "model_type": "\u2b55 : instruction-tuned", "likes": 664, "params": 3.821, "license": "mit", "private": false, "sender": "asier-gutierrez"}
openGPT-X/Teuken-7B-instruct-commercial-v0.4_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "openGPT-X/Teuken-7B-instruct-commercial-v0.4", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "instruction-tuned", "likes": 0, "params": 7.45, "license": "custom", "architecture": "", "sender": "mariagrandury"}
openGPT-X/Teuken-7B-instruct-research-v0.4_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "openGPT-X/Teuken-7B-instruct-research-v0.4", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-06-21T13:46:00Z", "model_type": "instruction-tuned", "likes": 0, "params": 7.45, "license": "custom", "architecture": "", "sender": "mariagrandury"}
orai-nlp/Llama-eus-8B_eval_request_False_bfloat16_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "orai-nlp/Llama-eus-8B", "base_model": "", "revision": "75b5645d222047b517a7a9190922ea1b5382c71f", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-10-01T08:20:05Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 3, "params": 8.03, "license": null, "private": false, "sender": "andercorral"}
 
1
+ {"model": "orai-nlp/Llama-eus-8B", "base_model": "", "revision": "75b5645d222047b517a7a9190922ea1b5382c71f", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-10-01T08:20:05Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 3, "params": 8.03, "license": null, "private": false, "sender": "andercorral"}
sandbox-ai/Llama-3.1-Tango-8b-f16_eval_request_False_float16_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "sandbox-ai/Llama-3.1-Tango-8b-f16", "base_model": "", "revision": "6be7482100037da375ba586234c59c5ccaad7ec1", "precision": "float16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2024-12-08T04:05:41Z", "model_type": "\ud83d\udd36 : fine-tuned", "likes": 0, "params": 8.03, "license": "llama3.1", "private": false, "sender": "tatakof"}
 
1
+ {"model": "sandbox-ai/Llama-3.1-Tango-8b-f16", "base_model": "", "revision": "6be7482100037da375ba586234c59c5ccaad7ec1", "precision": "float16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-12-08T04:05:41Z", "model_type": "\ud83d\udd36 : fine-tuned", "likes": 0, "params": 8.03, "license": "llama3.1", "private": false, "sender": "tatakof"}
scripts/generate.py CHANGED
@@ -5,7 +5,10 @@ from datetime import datetime
5
  import pandas as pd
6
 
7
 
8
- def generate_request(model_id, precision, model_type, params, index):
 
 
 
9
  data = {
10
  "model": model_id,
11
  "base_model": "",
@@ -13,11 +16,9 @@ def generate_request(model_id, precision, model_type, params, index):
13
  "private": False,
14
  "precision": precision,
15
  "weight_type": "Original",
16
- "status": "FINISHED",
17
- "submitted_time": (datetime.now() + pd.Timedelta(hours=index)).strftime(
18
- "%Y-%m-%dT%H:%M:%SZ"
19
- ),
20
- "model_type": f"\ud83d\udfe2 : {model_type} if model_type == 'pretrained' else model_type",
21
  "likes": 0,
22
  "params": params,
23
  "license": "custom",
@@ -32,24 +33,22 @@ def generate_request(model_id, precision, model_type, params, index):
32
 
33
  def generate_requests(selection: str):
34
  df = pd.read_csv("scripts/models.csv")
35
- df = df[["model_id", "precision", "model_type", "params", "iberobench"]]
36
 
37
  if selection == "pretrained":
38
  df = df[df["model_type"] == "pretrained"]
39
- elif selection == "pretrained_new":
40
- df = df[df["model_type"] == "pretrained"]
41
- df = df[df["iberobench"] == False]
42
- elif selection == "instruction":
43
  df = df[df["model_type"] == "instruction-tuned"]
 
 
44
 
45
- for index, row in df.iterrows():
46
- model_id, precision, model_type, params, iberobench = row
47
  generate_request(
48
  model_id=model_id,
49
  precision=precision,
50
  model_type=model_type,
51
  params=params,
52
- index=index,
53
  )
54
 
55
 
@@ -58,15 +57,15 @@ if __name__ == "__main__":
58
 
59
  parser = argparse.ArgumentParser(description="Generate model requests.")
60
  parser.add_argument("--pretrained", action="store_true")
61
- parser.add_argument("--pretrained_new", action="store_true")
62
- parser.add_argument("--instruction", action="store_true")
63
  args = parser.parse_args()
64
 
65
  if args.pretrained:
66
  generate_requests("pretrained")
67
- elif args.pretrained_new:
68
- generate_requests("pretrained_new")
69
- elif args.instruction:
70
- generate_requests("instruction")
71
  else:
72
- generate_requests()
 
5
  import pandas as pd
6
 
7
 
8
+ def generate_request(model_id, precision, model_type, params):
9
+ model_type = (
10
+ f"\ud83d\udfe2 : {model_type}" if model_type == "pretrained" else model_type
11
+ )
12
  data = {
13
  "model": model_id,
14
  "base_model": "",
 
16
  "private": False,
17
  "precision": precision,
18
  "weight_type": "Original",
19
+ "status": "PENDING",
20
+ "submitted_time": (datetime.now()).strftime("%Y-%m-%dT%H:%M:%SZ"),
21
+ "model_type": model_type,
 
 
22
  "likes": 0,
23
  "params": params,
24
  "license": "custom",
 
33
 
34
  def generate_requests(selection: str):
35
  df = pd.read_csv("scripts/models.csv")
36
+ df = df[["status", "model_id", "precision", "model_type", "params"]]
37
 
38
  if selection == "pretrained":
39
  df = df[df["model_type"] == "pretrained"]
40
+ elif selection == "instructed":
 
 
 
41
  df = df[df["model_type"] == "instruction-tuned"]
42
+ elif selection == "todo":
43
+ df = df[df["status"] == "Not started"]
44
 
45
+ for _, row in df.iterrows():
46
+ status, model_id, precision, model_type, params = row
47
  generate_request(
48
  model_id=model_id,
49
  precision=precision,
50
  model_type=model_type,
51
  params=params,
 
52
  )
53
 
54
 
 
57
 
58
  parser = argparse.ArgumentParser(description="Generate model requests.")
59
  parser.add_argument("--pretrained", action="store_true")
60
+ parser.add_argument("--instructed", action="store_true")
61
+ parser.add_argument("--todo", action="store_true")
62
  args = parser.parse_args()
63
 
64
  if args.pretrained:
65
  generate_requests("pretrained")
66
+ elif args.instructed:
67
+ generate_requests("instructed")
68
+ elif args.todo:
69
+ generate_requests("todo")
70
  else:
71
+ print("Please select a valid option between: pretrained, instructed, todo, all")
scripts/models.csv CHANGED
@@ -1,29 +1,120 @@
1
- status,model_id,model_url,iberobench,model_type,params,precision,gated,remote,avg,logs,error
2
- Not started,meta-llama/Meta-Llama-3.1-8B-Instruct,https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct,No,instruction-tuned,8.03,bfloat16,True,,,,
3
- Not started,mistralai/Mistral-7B-Instruct-v0.3,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3,No,instruction-tuned,7.25,bfloat16,True,,,,
4
- Not started,google/gemma-2-2b-it,https://huggingface.co/google/gemma-2-2b-it,No,instruction-tuned,2.61,bfloat16,True,,,,"Model ""google/gemma-2-2b-it"" was not found on hub! 'gemma2’"
5
- Not started,google/gemma-2-9b-it,https://huggingface.co/google/gemma-2-9b-it,No,instruction-tuned,9.24,bfloat16,True,,,,
6
- In progress,google/gemma-2-2b,https://huggingface.co/google/gemma-2-2b,Yes,pretrained,2.61,float32,True,,,,
7
- In progress,google/gemma-2-9b,https://huggingface.co/google/gemma-2-9b,Yes,pretrained,9.24,float32,True,,,,
8
- Not started,microsoft/Phi-3.5-mini-instruct,https://huggingface.co/microsoft/Phi-3.5-mini-instruct,No,instruction-tuned,3.82,bfloat16,False,True,,,"Model ""microsoft/Phi-3.5-mini-instruct"" needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard."
9
- Not started,microsoft/Phi-3-small-128k-instruct,https://huggingface.co/microsoft/Phi-3-small-128k-instruct,No,instruction-tuned,7.39,bfloat16,False,True,,,
10
- Not started,tiiuae/falcon-7b-instruct,https://huggingface.co/tiiuae/falcon-7b-instruct,No,instruction-tuned,7,bfloat16,False,False,,,
11
- Not started,01-ai/Yi-1.5-9B-Chat,https://huggingface.co/01-ai/Yi-1.5-9B-Chat,No,instruction-tuned,8.83,bfloat16,False,False,,,
12
- Not started,internlm/internlm2_5-7b-chat,https://huggingface.co/internlm/internlm2_5-7b-chat,No,instruction-tuned,7.74,bfloat16,False,True,,,
13
- In progress,HiTZ/latxa-7b-v1.2,https://huggingface.co/HiTZ/latxa-7b-v1.2,Yes,pretrained,7,bfloat16,False,,,,
14
- In progress,proxectonos/Carballo-bloom-1.3B,https://huggingface.co/proxectonos/Carballo-bloom-1.3B,Yes,pretrained,1.31,float16,False,,,,"Model ""proxectonos/Carballo-bloom-1.3B"" 's tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured? unhashable type: 'dict'"
15
- Not started,projecte-aina/aguila-7b,https://huggingface.co/projecte-aina/aguila-7b,No,pretrained,6.85,float16,False,True,,,
16
- In progress,projecte-aina/FLOR-6.3B,https://huggingface.co/projecte-aina/FLOR-6.3B,Yes,pretrained,6.25,float16,True,,,,
17
- Not started,projecte-aina/FLOR-6.3B-Instructed,https://huggingface.co/projecte-aina/FLOR-6.3B-Instructed,No,instruction-tuned,6.25,float16,True,,,,
18
- Not started,gplsi/Aitana-6.3B,https://huggingface.co/gplsi/Aitana-6.3B,No,pretrained,6.25,bfloat16,False,,,,Eval error: 'ScalarNode' object is not callable
19
- In progress,occiglot/occiglot-7b-es-en,https://huggingface.co/occiglot/occiglot-7b-es-en,Yes,pretrained,7.24,float32,False,,,,
20
- Not started,occiglot/occiglot-7b-es-en-instruct,https://huggingface.co/occiglot/occiglot-7b-es-en-instruct,No,instruction-tuned,7.24,float32,False,,,,
21
- Not started,LenguajeNaturalAI/leniachat-gemma-2b-v0,https://huggingface.co/LenguajeNaturalAI/leniachat-gemma-2b-v0,No,instruction-tuned,2.51,bfloat16,False,,,,"Model ""LenguajeNaturalAI/leniachat-gemma-2b-v0"" was not found on hub! 'gemma'"
22
- Not started,LenguajeNaturalAI/leniachat-qwen2-1.5B-v0,https://huggingface.co/LenguajeNaturalAI/leniachat-qwen2-1.5B-v0,No,instruction-tuned,1.54,bfloat16,False,,,,"Model ""LenguajeNaturalAI/leniachat-qwen2-1.5B-v0"" was not found on hub! 'qwen2'"
23
- Not started,bertin-project/Gromenauer-7B-Instruct,https://huggingface.co/bertin-project/Gromenauer-7B-Instruct,No,instruction-tuned,7.24,float32,False,,,,
24
- Not started,bertin-project/bertin-gpt-j-6B,https://huggingface.co/bertin-project/bertin-gpt-j-6B,No,pretrained,6,float32,False,,,,
25
- In progress,meta-llama/Meta-Llama-3.1-8B,https://huggingface.co/meta-llama/Meta-Llama-3.1-8B,Yes,pretrained,8.03,bfloat16,True,,,,
26
- In progress,mistralai/Mistral-7B-v0.3,https://huggingface.co/mistralai/Mistral-7B-v0.3,Yes,pretrained,7.25,bfloat16,True,,,,
27
- Not started,01-ai/Yi-1.5-9B,https://huggingface.co/01-ai/Yi-1.5-9B,No,pretrained,8.83,bfloat16,False,,,,
28
- Not started,microsoft/phi-1_5,https://huggingface.co/microsoft/phi-1_5,No,pretrained,1.42,float16,False,,,,
29
- Not started,tiiuae/falcon-7b,https://huggingface.co/tiiuae/falcon-7b,No,pretrained,7,bfloat16,False,,,,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ status,model_id,precision,model_type,date_download,params
2
+ Done,BSC-LT/salamandra-2b,bfloat16,pretrained,"December 2, 2024",2.25
3
+ Done,BSC-LT/salamandra-2b-instruct,bfloat16,instruction-tuned,"January 27, 2025",2.25
4
+ Done,BSC-LT/salamandra-7b,bfloat16,pretrained,"December 2, 2024",7.77
5
+ Done,BSC-LT/salamandra-7b-instruct,bfloat16,instruction-tuned,"January 27, 2025",7.77
6
+ Done,meta-llama/Llama-3.2-1B,bfloat16,pretrained,,1.24
7
+ Done,meta-llama/Llama-3.2-1B-Instruct,bfloat16,instruction-tuned,,1.24
8
+ Done,meta-llama/Meta-Llama-3.1-8B,bfloat16,pretrained,"September 13, 2024",8.03
9
+ Done,meta-llama/Meta-Llama-3.1-8B-Instruct,bfloat16,instruction-tuned,"September 13, 2024",8.03
10
+ Done,bertin-project/Gromenauer-7B,float32,pretrained,"December 11, 2024",7.24
11
+ Done,bertin-project/Gromenauer-7B-Instruct,float32,instruction-tuned,"December 11, 2024",7.24
12
+ Done,HiTZ/latxa-7b-v1.2,bfloat16,pretrained,"July 25, 2024",7
13
+ Done,mistralai/Mistral-7B-Instruct-v0.3,bfloat16,instruction-tuned,"August 26, 2024",7.25
14
+ Done,mistralai/Mistral-7B-v0.3,bfloat16,pretrained,"June 27, 2024",7.25
15
+ Done,occiglot/occiglot-7b-es-en,float32,pretrained,"June 5, 2024",7.24
16
+ Done,occiglot/occiglot-7b-es-en-instruct,float32,instruction-tuned,"November 29, 2024",7.24
17
+ Done,projecte-aina/FLOR-6.3B,float16,pretrained,"June 3, 2024",6.25
18
+ Done,projecte-aina/FLOR-6.3B-Instructed,float16,instruction-tuned,"October 28, 2024",6.25
19
+ Done,gplsi/Aitana-6.3B,bfloat16,pretrained,"June 5, 2024",6.25
20
+ Not started,Nos-PT/Llama-Carvalho-GL,float16,pretrained,,8.03
21
+ Not started,Nos-PT/Llama-Carvalho-PT-GL,float16,pretrained,,8.03
22
+ Done,proxectonos/Carballo-bloom-1.3B,float16,pretrained,"December 11, 2024",1.31
23
+ Not started,proxectonos/Llama-3.1-Carballo,float16,pretrained,,8.03
24
+ Done,IIC/RigoChat-7b-v2,bfloat16,instruction-tuned,"February 5, 2025",7.62
25
+ Done,LenguajeNaturalAI/leniachat-gemma-2b-v0,bfloat16,instruction-tuned,"December 11, 2024",2.51
26
+ Done,LenguajeNaturalAI/leniachat-qwen2-1.5B-v0,bfloat16,instruction-tuned,"December 11, 2024",1.54
27
+ Done,utter-project/EuroLLM-1.7B,bfloat16,pretrained,,1.7
28
+ Done,utter-project/EuroLLM-1.7B-Instruct,bfloat16,instruction-tuned,"September 23, 2024",1.7
29
+ Done,utter-project/EuroLLM-9B,bfloat16,pretrained,"February 10, 2025",9.15
30
+ Done,utter-project/EuroLLM-9B-Instruct,bfloat16,instruction-tuned,"February 10, 2025",9.15
31
+ Not started,HuggingFaceTB/SmolLM2-1.7B,bfloat16,pretrained,,1.71
32
+ Test,HuggingFaceTB/SmolLM2-1.7B-Instruct,bfloat16,instruction-tuned,,1.71
33
+ Done,CohereForAI/aya-expanse-8b,float16,pretrained,"December 3, 2024",8.03
34
+ Done,Qwen/Qwen2.5-1.5B,bfloat16,pretrained,,1.54
35
+ Done,Qwen/Qwen2.5-1.5B-Instruct,bfloat16,instruction-tuned,,1.54
36
+ Done,Qwen/Qwen2.5-7B,bfloat16,pretrained,,7.62
37
+ Done,Qwen/Qwen2.5-7B-Instruct,bfloat16,instruction-tuned,,7.62
38
+ Done,google/gemma-2-2b,float32,pretrained,"September 12, 2024",2.61
39
+ Done,google/gemma-2-2b-it,bfloat16,instruction-tuned,"August 2, 2024",2.61
40
+ Done,google/gemma-2-9b,float32,pretrained,"September 12, 2024",9.24
41
+ Done,google/gemma-2-9b-it,bfloat16,instruction-tuned,"September 12, 2024",9.24
42
+ Not started,meta-llama/Llama-3.2-3B,bfloat16,pretrained,"November 8, 2024",3.21
43
+ Not started,meta-llama/Llama-3.2-3B-Instruct,bfloat16,instruction-tuned,"January 27, 2025",3.21
44
+ Not started,Qwen/Qwen2.5-3B,bfloat16,pretrained,,3.09
45
+ Not started,Qwen/Qwen2.5-3B-Instruct,bfloat16,instruction-tuned,,3.09
46
+ Done,microsoft/phi-1_5,float16,pretrained,"December 11, 2024",1.42
47
+ Not started,microsoft/Phi-3-small-128k-instruct,bfloat16,instruction-tuned,"December 11, 2024",7.39
48
+ Not started,microsoft/Phi-3.5-mini-instruct,bfloat16,instruction-tuned,"December 11, 2024",3.82
49
+ ,tiiuae/Falcon3-7B-Base,bfloat16,pretrained,,
50
+ In progress,projecte-aina/aguila-7b,float16,pretrained,"September 17, 2024",6.85
51
+ Done,projecte-aina/FLOR-1.3B,float32,pretrained,,1.31
52
+ Done,projecte-aina/FLOR-1.3B-Instructed,float16,instruction-tuned,,1.31
53
+ Done,01-ai/Yi-1.5-9B,bfloat16,pretrained,"December 11, 2024",8.83
54
+ Done,01-ai/Yi-1.5-9B-Chat,bfloat16,instruction-tuned,"December 11, 2024",8.83
55
+ Done,occiglot/occiglot-7b-eu5,bfloat16,pretrained,,7.24
56
+ Done,occiglot/occiglot-7b-eu5-instruct,bfloat16,instruction-tuned,,7.24
57
+ Not started,openGPT-X/Teuken-7B-instruct-commercial-v0.4,bfloat16,instruction-tuned,,7.45
58
+ Not started,openGPT-X/Teuken-7B-instruct-research-v0.4,bfloat16,instruction-tuned,,7.45
59
+ ,deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B,bfloat16,instruction-tuned,,1.78
60
+ ,deepseek-ai/DeepSeek-R1-Distill-Qwen-7B,bfloat16,instruction-tuned,,7.62
61
+ Test,Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8,float16,instruction-tuned,,4.99
62
+ Not started,Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4,float16,instruction-tuned,,5.74
63
+ Not started,Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4,float16,instruction-tuned,,11.9
64
+ Test,Qwen/Qwen3-14B-AWQ,float16,instruction-tuned,,3.32
65
+ Not started,Qwen/Qwen3-30B-A3B-GPTQ-Int4,float16,instruction-tuned,,4.67
66
+ Not started,Qwen/Qwen3-32B-AWQ,float16,instruction-tuned,,5.73
67
+ ,unsloth/DeepSeek-R1-Distill-Qwen-14B-bnb-4bit,bfloat16,instruction-tuned,,8.37
68
+ Test,google/gemma-3-1b-it,bfloat16,instruction-tuned,,1
69
+ Not started,google/gemma-3-1b-pt,bfloat16,pretrained,,1
70
+ Not started,google/gemma-3-4b-it,bfloat16,instruction-tuned,,4.3
71
+ Not started,google/gemma-3-4b-pt,bfloat16,pretrained,,4.3
72
+ Not started,Qwen/Qwen3-0.6B,bfloat16,instruction-tuned,,0.753
73
+ Not started,Qwen/Qwen3-0.6B-Base,bfloat16,instruction-tuned,,0.753
74
+ Test,Qwen/Qwen3-1.7B,bfloat16,instruction-tuned,,2.03
75
+ Not started,Qwen/Qwen3-1.7B-Base,bfloat16,instruction-tuned,,2.03
76
+ Not started,Qwen/Qwen3-4B,bfloat16,instruction-tuned,,4.02
77
+ Not started,Qwen/Qwen3-4B-Base,bfloat16,instruction-tuned,,4.02
78
+ Not started,Qwen/Qwen3-8B,bfloat16,instruction-tuned,,8.19
79
+ Not started,Qwen/Qwen3-8B-Base,bfloat16,instruction-tuned,,8.19
80
+ Done,bertin-project/bertin-gpt-j-6B,float32,pretrained,"December 11, 2024",6.06
81
+ ,Qwen/Qwen2-7B,bfloat16,pretrained,"December 11, 2024",7
82
+ In progress,tiiuae/falcon-7b,bfloat16,pretrained,,7
83
+ Not started,tiiuae/falcon-7b-instruct,bfloat16,instruction-tuned,"October 31, 2024",7
84
+ Not started,AIDC-AI/Marco-LLM-ES,bfloat16,pretrained,,7.62
85
+ ,Danielbrdz/Barcenas-27b,float32,pretrained,"December 11, 2024",27
86
+ ,ibm-granite/granite-3.0-8b-base,float32,pretrained,"October 24, 2024",8
87
+ ,ibm-granite/granite-3.0-8b-instruct,float32,instruction-tuned,"December 11, 2024",8
88
+ ,Iker/Llama-3-Instruct-Neurona-8b-v2,bfloat16,instruction-tuned,"December 11, 2024",8
89
+ ,internlm/internlm2_5-7b-chat,bfloat16,instruction-tuned,"December 11, 2024",7.74
90
+ In progress,orai-nlp/Llama-eus-8B,bfloat16,pretrained,"December 11, 2024",8
91
+ ,sandbox-ai/Llama-3.1-Tango-8b-f16,float16,fine-tuned,,
92
+ Not started,Almawave/Velvet-14B,bfloat16,pretrained,,14.1
93
+ Not started,google/gemma-3-12b-it,bfloat16,instruction-tuned,,12.2
94
+ Not started,google/gemma-3-12b-pt,bfloat16,pretrained,,12.2
95
+ Not started,HiTZ/latxa-13b-v1.2,bfloat16,pretrained,"December 3, 2024",13
96
+ ,microsoft/phi-4,bfloat16,instruction-tuned,,14.7
97
+ Not started,Qwen/Qwen3-14B,bfloat16,instruction-tuned,,14.8
98
+ Not started,Qwen/Qwen3-14B-Base,bfloat16,instruction-tuned,,14.8
99
+ ,speakleash/Bielik-11B-v2.3-Instruct,float16,RL-tuned,,11
100
+ ,tiiuae/Falcon3-10B-Base,bfloat16,pretrained,,
101
+ ,BSC-LT/ALIA-40b,bfloat16,pretrained,,40.4
102
+ Not started,CohereForAI/aya-expanse-32b,float16,instruction-tuned,,32.3
103
+ Not started,CohereForAI/c4ai-command-r-08-2024,float16,pretrained,,32.3
104
+ Not started,google/gemma-3-27b-it,bfloat16,instruction-tuned,,27.4
105
+ Not started,google/gemma-3-27b-pt,bfloat16,pretrained,,27.4
106
+ ,mistralai/Mistral-Small-24B-Base-2501,bfloat16,pretrained,,23.6
107
+ ,mistralai/Mistral-Small-24B-Instruct-2501,bfloat16,instruction-tuned,,23.6
108
+ Not started,Qwen/Qwen2.5-32B,bfloat16,pretrained,,32.8
109
+ In progress,Qwen/Qwen2.5-32B-Instruct,bfloat16,instruction-tuned,"October 31, 2024",32.8
110
+ Not started,Qwen/Qwen3-30B-A3B,bfloat16,instruction-tuned,,30.5
111
+ Not started,Qwen/Qwen3-30B-A3B-Base,bfloat16,pretrained,,30.5
112
+ Not started,Qwen/Qwen3-32B,bfloat16,instruction-tuned,,32.8
113
+ Not started,Qwen/Qwen3-32B-Base,bfloat16,instruction-tuned,,32.8
114
+ ,deepseek-ai/DeepSeek-R1-Distill-Llama-8B,bfloat16,instruction-tuned,,8.03
115
+ ,TheBloke/CodeLlama-70B-Instruct-AWQ,float16,instruction-tuned,,9.68
116
+ Not started,TheBloke/Llama-2-13B-chat-GPTQ,float16,instruction-tuned,,2.03
117
+ ,TheBloke/Llama-2-70B-GPTQ,float16,,,9.1
118
+ ,TheBloke/Mixtral-8x7B-v0.1-GPTQ,bfloat16,,,6.09
119
+ ,HiTZ/latxa-70b-v1.2,bfloat16,pretrained,"November 27, 2024",70
120
+ ,sandbox-ai/Llama-3.1-Tango-70b,float16,instruction-tuned,,70
tiiuae/Falcon3-7B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -1 +1 @@
1
- {"model": "tiiuae/Falcon3-7B-Base", "base_model": "", "revision": "bf3d7ed586cb22a921520e2d681a9d3d7642cde8", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "RUNNING", "submitted_time": "2025-01-13T06:47:33Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 21, "params": 7.456, "license": "other", "private": false, "sender": "rcojocaru"}
 
1
+ {"model": "tiiuae/Falcon3-7B-Base", "base_model": "", "revision": "bf3d7ed586cb22a921520e2d681a9d3d7642cde8", "precision": "bfloat16", "architecture": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-01-13T06:47:33Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 21, "params": 7.456, "license": "other", "private": false, "sender": "rcojocaru"}