bgamazay commited on
Commit
e7cbd6a
·
verified ·
1 Parent(s): ef24be8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +201 -354
app.py CHANGED
@@ -1,253 +1,191 @@
1
- import gradio as gr
 
 
 
 
2
  import pandas as pd
3
- import os
4
- import zipfile
5
- import base64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
8
- CITATION_BUTTON_TEXT = r"""@misc{aienergyscore-leaderboard,
9
- author = {Sasha Luccioni and Boris Gamazaychikov and Emma Strubell and Sara Hooker and Yacine Jernite and Carole-Jean Wu and Margaret Mitchell},
10
- title = {AI Energy Score Leaderboard - February 2025},
11
- year = {2025},
12
- publisher = {Hugging Face},
13
- howpublished = "\url{https://huggingface.co/spaces/AIEnergyScore/Leaderboard}",
14
- }"""
15
 
16
- # List of tasks (CSV filenames)
17
- tasks = [
18
- 'asr.csv',
19
- 'object_detection.csv',
20
- 'text_classification.csv',
21
- 'image_captioning.csv',
22
- 'question_answering.csv',
23
- 'text_generation.csv',
24
- 'image_classification.csv',
25
- 'sentence_similarity.csv',
26
- 'image_generation.csv',
27
- 'summarization.csv'
28
- ]
29
 
30
- def format_stars(score):
 
31
  try:
32
- score_int = int(score)
33
- except Exception:
34
- score_int = 0
35
- # Render stars in black with a slightly larger font.
36
- return f'<span style="color: black; font-size:1.5em;">{"★" * score_int}</span>'
37
-
38
- def make_link(mname):
39
- parts = str(mname).split('/')
40
- display_name = parts[1] if len(parts) > 1 else mname
41
- return f'<a href="https://huggingface.co/{mname}" target="_blank">{display_name}</a>'
42
-
43
- def extract_link_text(html_link):
44
- """Extracts the inner text from an HTML link."""
45
- start = html_link.find('>') + 1
46
- end = html_link.rfind('</a>')
47
- if start > 0 and end > start:
48
- return html_link[start:end]
 
49
  else:
50
- return html_link
51
-
52
- def generate_html_table_from_df(df):
53
- """
54
- Given a dataframe with a numeric energy column (gpu_energy_numeric),
55
- generate an HTML table with three columns:
56
- - Model (the link, with a fixed width based on the longest model name)
57
- - GPU Energy (Wh) plus a horizontal bar whose width is proportional
58
- to the energy value relative to the maximum in the table.
59
- - Score (displayed as stars)
60
- """
61
- # Compute a static width (in pixels) for the Model column based on the longest model name.
62
- if not df.empty:
63
- max_length = max(len(extract_link_text(link)) for link in df['Model'])
 
64
  else:
65
- max_length = 10
66
- # Multiply by an estimated average character width (10 pixels) and add some extra padding.
67
- static_width = max_length * 10 + 16
68
-
69
- max_energy = df['gpu_energy_numeric'].max() if not df.empty else 1
70
- color_map = {"1": "black", "2": "black", "3": "black", "4": "black", "5": "black"}
71
- html = '<table style="width:100%; border-collapse: collapse; font-family: Inter, sans-serif;">'
72
- # Keep only one header (the one with hover text)
73
- html += '<thead><tr style="background-color: #f2f2f2;">'
74
- html += '<th style="text-align: left; padding: 8px;" title="Model name with link to Hugging Face">Model</th>'
75
- html += '<th style="text-align: left; padding: 8px;" title="GPU energy consumed in Watt-hours for 1,000 queries">GPU Energy (Wh)</th>'
76
- html += '<th style="text-align: left; padding: 8px;" title="5 is most efficient, 1 is least. Relative energy efficiency score relative to other models in task/class at the time of leaderboard launch">Score</th>'
77
- html += '</tr></thead>'
78
- html += '<tbody>'
79
- for _, row in df.iterrows():
80
- energy_numeric = row['gpu_energy_numeric']
81
- energy_str = f"{energy_numeric:.2f}"
82
- # Compute the relative width (as a percentage)
83
- bar_width = (energy_numeric / max_energy) * 100
84
- score_val = row['energy_score']
85
- bar_color = color_map.get(str(score_val), "gray")
86
- html += '<tr>'
87
- html += f'<td style="padding: 8px; width: {static_width}px;">{row["Model"]}</td>'
88
- html += (
89
- f'<td style="padding: 8px;">{energy_str}<br>'
90
- f'<div style="background-color: {bar_color}; width: {bar_width:.1f}%; height: 10px;"></div></td>'
91
- )
92
- html += f'<td style="padding: 8px;">{row["Score"]}</td>'
93
- html += '</tr>'
94
- html += '</tbody></table>'
95
- return html
96
-
97
- # --- Function to zip all CSV files ---
98
- def zip_csv_files():
99
- data_dir = "data/energy"
100
- zip_filename = "data.zip"
101
- with zipfile.ZipFile(zip_filename, "w", zipfile.ZIP_DEFLATED) as zipf:
102
- for filename in os.listdir(data_dir):
103
- if filename.endswith(".csv"):
104
- filepath = os.path.join(data_dir, filename)
105
- zipf.write(filepath, arcname=filename)
106
- return zip_filename
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  def get_zip_data_link():
109
- """Creates a data URI download link for the ZIP file."""
110
- zip_filename = zip_csv_files()
111
- with open(zip_filename, "rb") as f:
112
- data = f.read()
113
- b64 = base64.b64encode(data).decode()
114
- href = (
115
- f'<a href="data:application/zip;base64,{b64}" '
116
- 'download="data.zip" '
117
- 'style="margin: 0 15px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
118
- 'color: black; font-family: \'Inter\', sans-serif;">Download Data</a>'
119
  )
120
- return href
121
-
122
- # --- Modified functions to include a sort_order parameter ---
123
- def get_model_names_html(task, sort_order="Low to High"):
124
- df = pd.read_csv('data/energy/' + task)
125
- if df.columns[0].startswith("Unnamed:"):
126
- df = df.iloc[:, 1:]
127
- df['energy_score'] = df['energy_score'].astype(int)
128
- # Convert kWh to Wh:
129
- df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
130
- df['Model'] = df['model'].apply(make_link)
131
- df['Score'] = df['energy_score'].apply(format_stars)
132
- ascending = True if sort_order == "Low to High" else False
133
- df = df.sort_values(by='gpu_energy_numeric', ascending=ascending)
134
- return generate_html_table_from_df(df)
135
-
136
- def get_all_model_names_html(sort_order="Low to High"):
137
- all_df = pd.DataFrame()
138
- for task in tasks:
139
- df = pd.read_csv('data/energy/' + task)
140
- if df.columns[0].startswith("Unnamed:"):
141
- df = df.iloc[:, 1:]
142
- df['energy_score'] = df['energy_score'].astype(int)
143
- df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
144
- df['Model'] = df['model'].apply(make_link)
145
- df['Score'] = df['energy_score'].apply(format_stars)
146
- all_df = pd.concat([all_df, df], ignore_index=True)
147
- all_df = all_df.drop_duplicates(subset=['model'])
148
- ascending = True if sort_order == "Low to High" else False
149
- all_df = all_df.sort_values(by='gpu_energy_numeric', ascending=ascending)
150
- return generate_html_table_from_df(all_df)
151
-
152
- def get_text_generation_model_names_html(model_class, sort_order="Low to High"):
153
- df = pd.read_csv('data/energy/text_generation.csv')
154
- if df.columns[0].startswith("Unnamed:"):
155
- df = df.iloc[:, 1:]
156
- if 'class' in df.columns:
157
- df = df[df['class'] == model_class]
158
- df['energy_score'] = df['energy_score'].astype(int)
159
- df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
160
- df['Model'] = df['model'].apply(make_link)
161
- df['Score'] = df['energy_score'].apply(format_stars)
162
- ascending = True if sort_order == "Low to High" else False
163
- df = df.sort_values(by='gpu_energy_numeric', ascending=ascending)
164
- return generate_html_table_from_df(df)
165
-
166
- # --- Update functions for dropdown changes ---
167
-
168
- # For Text Generation, two dropdowns: model class and sort order.
169
- def update_text_generation(selected_display, sort_order):
170
- mapping = {
171
- "A (Single Consumer GPU) <20B parameters": "A",
172
- "B (Single Cloud GPU) 20-66B parameters": "B",
173
- "C (Multiple Cloud GPUs) >66B parameters": "C"
174
- }
175
- model_class = mapping.get(selected_display, "A")
176
- return get_text_generation_model_names_html(model_class, sort_order)
177
-
178
- # For the other tabs, each update function simply takes the sort_order.
179
- def update_image_generation(sort_order):
180
- return get_model_names_html('image_generation.csv', sort_order)
181
-
182
- def update_text_classification(sort_order):
183
- return get_model_names_html('text_classification.csv', sort_order)
184
-
185
- def update_image_classification(sort_order):
186
- return get_model_names_html('image_classification.csv', sort_order)
187
-
188
- def update_image_captioning(sort_order):
189
- return get_model_names_html('image_captioning.csv', sort_order)
190
-
191
- def update_summarization(sort_order):
192
- return get_model_names_html('summarization.csv', sort_order)
193
-
194
- def update_asr(sort_order):
195
- return get_model_names_html('asr.csv', sort_order)
196
-
197
- def update_object_detection(sort_order):
198
- return get_model_names_html('object_detection.csv', sort_order)
199
 
200
- def update_sentence_similarity(sort_order):
201
- return get_model_names_html('sentence_similarity.csv', sort_order)
202
-
203
- def update_extractive_qa(sort_order):
204
- return get_model_names_html('question_answering.csv', sort_order)
205
-
206
- def update_all_tasks(sort_order):
207
- return get_all_model_names_html(sort_order)
208
-
209
- # --- Build the Gradio Interface ---
210
-
211
- demo = gr.Blocks(css="""
212
- .gr-dataframe table {
213
- table-layout: fixed;
214
- width: 100%;
215
- }
216
- .gr-dataframe th, .gr-dataframe td {
217
- max-width: 150px;
218
- white-space: nowrap;
219
- overflow: hidden;
220
- text-overflow: ellipsis;
221
- }
222
- """)
223
 
224
- with demo:
225
  # --- Header Links (at the very top) ---
226
- with gr.Row():
227
  submission_link = gr.HTML(
228
  '<a href="https://huggingface.co/spaces/AIEnergyScore/submission_portal" '
229
- 'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
230
  'color: black; font-family: \'Inter\', sans-serif;">Submission Portal</a>'
231
  )
232
  label_link = gr.HTML(
233
  '<a href="https://huggingface.co/spaces/AIEnergyScore/Label" '
234
- 'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
235
  'color: black; font-family: \'Inter\', sans-serif;">Label Generator</a>'
236
  )
237
  faq_link = gr.HTML(
238
  '<a href="https://huggingface.github.io/AIEnergyScore/#faq" '
239
- 'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
240
  'color: black; font-family: \'Inter\', sans-serif;">FAQ</a>'
241
  )
242
  documentation_link = gr.HTML(
243
  '<a href="https://huggingface.github.io/AIEnergyScore/#documentation" '
244
- 'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
245
  'color: black; font-family: \'Inter\', sans-serif;">Documentation</a>'
246
  )
247
  download_link = gr.HTML(get_zip_data_link())
248
  community_link = gr.HTML(
249
  '<a href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" '
250
- 'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
251
  'color: black; font-family: \'Inter\', sans-serif;">Community</a>'
252
  )
253
 
@@ -256,147 +194,56 @@ with demo:
256
  <div style="margin-top: 0px;">
257
  <img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png"
258
  alt="Logo"
259
- style="display: block; margin: 0 auto; max-width: 500px; height: auto;">
260
  </div>
261
  ''')
262
 
263
-
264
  # --- Subtitle (centered) ---
265
- gr.Markdown('<p style="text-align: center;">Welcome to the <a href="https://huggingface.co/AIEnergyScore">AI Energy Score</a> Leaderboard. Select the task to see scored model results.</p>')
266
-
267
- # --- Tabs for the different tasks ---
268
- with gr.Tabs():
269
- # --- Text Generation Tab ---
270
- with gr.TabItem("Text Generation 💬"):
271
- with gr.Row():
272
- model_class_options = [
273
- "A (Single Consumer GPU) <20B parameters",
274
- "B (Single Cloud GPU) 20-66B parameters",
275
- "C (Multiple Cloud GPUs) >66B parameters"
276
- ]
277
- model_class_dropdown = gr.Dropdown(
278
- choices=model_class_options,
279
- label="Select Model Class",
280
- value=model_class_options[0]
281
- )
282
- sort_dropdown_tg = gr.Dropdown(
283
- choices=["Low to High", "High to Low"],
284
- label="Sort",
285
- value="Low to High"
286
  )
287
- tg_table = gr.HTML(get_text_generation_model_names_html("A", "Low to High"))
288
- # When either dropdown changes, update the table.
289
- model_class_dropdown.change(fn=update_text_generation, inputs=[model_class_dropdown, sort_dropdown_tg], outputs=tg_table)
290
- sort_dropdown_tg.change(fn=update_text_generation, inputs=[model_class_dropdown, sort_dropdown_tg], outputs=tg_table)
291
 
292
- # --- Image Generation Tab ---
293
- with gr.TabItem("Image Generation 📷"):
294
- sort_dropdown_img = gr.Dropdown(
295
- choices=["Low to High", "High to Low"],
296
- label="Sort",
297
- value="Low to High"
298
- )
299
- img_table = gr.HTML(get_model_names_html('image_generation.csv', "Low to High"))
300
- sort_dropdown_img.change(fn=update_image_generation, inputs=sort_dropdown_img, outputs=img_table)
301
-
302
- # --- Text Classification Tab ---
303
- with gr.TabItem("Text Classification 🎭"):
304
- sort_dropdown_tc = gr.Dropdown(
305
- choices=["Low to High", "High to Low"],
306
- label="Sort",
307
- value="Low to High"
308
- )
309
- tc_table = gr.HTML(get_model_names_html('text_classification.csv', "Low to High"))
310
- sort_dropdown_tc.change(fn=update_text_classification, inputs=sort_dropdown_tc, outputs=tc_table)
311
-
312
- # --- Image Classification Tab ---
313
- with gr.TabItem("Image Classification 🖼️"):
314
- sort_dropdown_ic = gr.Dropdown(
315
- choices=["Low to High", "High to Low"],
316
- label="Sort",
317
- value="Low to High"
318
- )
319
- ic_table = gr.HTML(get_model_names_html('image_classification.csv', "Low to High"))
320
- sort_dropdown_ic.change(fn=update_image_classification, inputs=sort_dropdown_ic, outputs=ic_table)
321
-
322
- # --- Image Captioning Tab ---
323
- with gr.TabItem("Image Captioning 📝"):
324
- sort_dropdown_icap = gr.Dropdown(
325
- choices=["Low to High", "High to Low"],
326
- label="Sort",
327
- value="Low to High"
328
- )
329
- icap_table = gr.HTML(get_model_names_html('image_captioning.csv', "Low to High"))
330
- sort_dropdown_icap.change(fn=update_image_captioning, inputs=sort_dropdown_icap, outputs=icap_table)
331
-
332
- # --- Summarization Tab ---
333
- with gr.TabItem("Summarization 📃"):
334
- sort_dropdown_sum = gr.Dropdown(
335
- choices=["Low to High", "High to Low"],
336
- label="Sort",
337
- value="Low to High"
338
- )
339
- sum_table = gr.HTML(get_model_names_html('summarization.csv', "Low to High"))
340
- sort_dropdown_sum.change(fn=update_summarization, inputs=sort_dropdown_sum, outputs=sum_table)
341
-
342
- # --- Automatic Speech Recognition Tab ---
343
- with gr.TabItem("Automatic Speech Recognition 💬"):
344
- sort_dropdown_asr = gr.Dropdown(
345
- choices=["Low to High", "High to Low"],
346
- label="Sort",
347
- value="Low to High"
348
- )
349
- asr_table = gr.HTML(get_model_names_html('asr.csv', "Low to High"))
350
- sort_dropdown_asr.change(fn=update_asr, inputs=sort_dropdown_asr, outputs=asr_table)
351
-
352
- # --- Object Detection Tab ---
353
- with gr.TabItem("Object Detection 🚘"):
354
- sort_dropdown_od = gr.Dropdown(
355
- choices=["Low to High", "High to Low"],
356
- label="Sort",
357
- value="Low to High"
358
- )
359
- od_table = gr.HTML(get_model_names_html('object_detection.csv', "Low to High"))
360
- sort_dropdown_od.change(fn=update_object_detection, inputs=sort_dropdown_od, outputs=od_table)
361
-
362
- # --- Sentence Similarity Tab ---
363
- with gr.TabItem("Sentence Similarity 📚"):
364
- sort_dropdown_ss = gr.Dropdown(
365
- choices=["Low to High", "High to Low"],
366
- label="Sort",
367
- value="Low to High"
368
- )
369
- ss_table = gr.HTML(get_model_names_html('sentence_similarity.csv', "Low to High"))
370
- sort_dropdown_ss.change(fn=update_sentence_similarity, inputs=sort_dropdown_ss, outputs=ss_table)
371
 
372
- # --- Extractive QA Tab ---
373
- with gr.TabItem("Extractive QA "):
374
- sort_dropdown_qa = gr.Dropdown(
375
- choices=["Low to High", "High to Low"],
376
- label="Sort",
377
- value="Low to High"
378
- )
379
- qa_table = gr.HTML(get_model_names_html('question_answering.csv', "Low to High"))
380
- sort_dropdown_qa.change(fn=update_extractive_qa, inputs=sort_dropdown_qa, outputs=qa_table)
 
 
 
381
 
382
- # --- All Tasks Tab ---
383
- with gr.TabItem("All Tasks 💡"):
384
- sort_dropdown_all = gr.Dropdown(
385
- choices=["Low to High", "High to Low"],
386
- label="Sort",
387
- value="Low to High"
388
- )
389
- all_table = gr.HTML(get_all_model_names_html("Low to High"))
390
- sort_dropdown_all.change(fn=update_all_tasks, inputs=sort_dropdown_all, outputs=all_table)
391
-
392
- with gr.Accordion("📙 Citation", open=False):
393
- citation_button = gr.Textbox(
394
- value=CITATION_BUTTON_TEXT,
395
- label=CITATION_BUTTON_LABEL,
396
- elem_id="citation-button",
397
- lines=10,
398
- show_copy_button=True,
399
- )
400
- gr.Markdown("""Last updated: February 2025""")
401
-
402
- demo.launch()
 
1
+ import os, glob
2
+ import json
3
+ from datetime import datetime, timezone
4
+ from dataclasses import dataclass
5
+ from datasets import load_dataset, Dataset
6
  import pandas as pd
7
+ import gradio as gr
8
+ from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
9
+ from enum import Enum
10
+
11
+ OWNER = "AIEnergyScore"
12
+ COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
13
+ TOKEN = os.environ.get("DEBUG")
14
+ API = HfApi(token=TOKEN)
15
+
16
+ task_mappings = {
17
+ 'automatic speech recognition': 'automatic-speech-recognition',
18
+ 'Object Detection': 'object-detection',
19
+ 'Text Classification': 'text-classification',
20
+ 'Image to Text': 'image-to-text',
21
+ 'Question Answering': 'question-answering',
22
+ 'Text Generation': 'text-generation',
23
+ 'Image Classification': 'image-classification',
24
+ 'Sentence Similarity': 'sentence-similarity',
25
+ 'Image Generation': 'image-generation',
26
+ 'Summarization': 'summarization'
27
+ }
28
 
29
+ @dataclass
30
+ class ModelDetails:
31
+ name: str
32
+ display_name: str = ""
33
+ symbol: str = "" # emoji
 
 
 
34
 
35
+ def start_compute_space():
36
+ API.restart_space(COMPUTE_SPACE)
37
+ gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")
 
 
 
 
 
 
 
 
 
 
38
 
39
+ def get_model_size(model_info: ModelInfo):
40
+ """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
41
  try:
42
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
43
+ except (AttributeError, TypeError):
44
+ return 0 # Unknown model sizes are indicated as 0
45
+ return model_size
46
+
47
+ def add_docker_eval(zip_file):
48
+ new_fid_list = zip_file.split("/")
49
+ new_fid = new_fid_list[-1]
50
+ if new_fid.endswith('.zip'):
51
+ API.upload_file(
52
+ path_or_fileobj=zip_file,
53
+ repo_id="AIEnergyScore/tested_proprietary_models",
54
+ path_in_repo='submitted_models/' + new_fid,
55
+ repo_type="dataset",
56
+ commit_message="Adding logs via submission Space.",
57
+ token=TOKEN
58
+ )
59
+ gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
60
  else:
61
+ gr.Info('You can only upload .zip files here!')
62
+
63
+ def add_new_eval(repo_id: str, task: str):
64
+ model_owner = repo_id.split("/")[0]
65
+ model_name = repo_id.split("/")[1]
66
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
67
+ requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
68
+ requests_dset = requests.to_pandas()
69
+ model_list = requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
70
+ task_models = list(API.list_models(filter=task_mappings[task]))
71
+ task_model_names = [m.id for m in task_models]
72
+ if repo_id in model_list:
73
+ gr.Info('This model has already been run!')
74
+ elif repo_id not in task_model_names:
75
+ gr.Info("This model isn't compatible with the chosen task! Pick a different model-task combination")
76
  else:
77
+ try:
78
+ model_info = API.model_info(repo_id=repo_id)
79
+ model_size = get_model_size(model_info=model_info)
80
+ likes = model_info.likes
81
+ except Exception:
82
+ gr.Info("Could not find information for model %s" % (model_name))
83
+ model_size = None
84
+ likes = None
85
+
86
+ gr.Info("Adding request")
87
+ request_dict = {
88
+ "model": repo_id,
89
+ "status": "PENDING",
90
+ "submitted_time": pd.to_datetime(current_time),
91
+ "task": task_mappings[task],
92
+ "likes": likes,
93
+ "params": model_size,
94
+ "leaderboard_version": "v0",
95
+ }
96
+ print("Writing out request file to dataset")
97
+ df_request_dict = pd.DataFrame([request_dict])
98
+ print(df_request_dict)
99
+ df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
100
+ updated_dset = Dataset.from_pandas(df_final)
101
+ updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)
102
+ gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
103
+ return start_compute_space()
104
+
105
+ def print_existing_models():
106
+ requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
107
+ requests_dset = requests.to_pandas()
108
+ model_df = requests_dset[['model', 'status']]
109
+ model_df = model_df[model_df['status'] == 'COMPLETED']
110
+ return model_df
111
+
112
+ def highlight_cols(x):
113
+ df = x.copy()
114
+ df[df['status'] == 'COMPLETED'] = 'color: green'
115
+ df[df['status'] == 'PENDING'] = 'color: orange'
116
+ df[df['status'] == 'FAILED'] = 'color: red'
117
+ return df
118
+
119
+ # Applying the style function for the table
120
+ existing_models = print_existing_models()
121
+ formatted_df = existing_models.style.apply(highlight_cols, axis=None)
122
+
123
+ def get_leaderboard_models():
124
+ path = r'leaderboard_v0_data/energy'
125
+ filenames = glob.glob(path + "/*.csv")
126
+ data = []
127
+ for filename in filenames:
128
+ data.append(pd.read_csv(filename))
129
+ leaderboard_data = pd.concat(data, ignore_index=True)
130
+ return leaderboard_data[['model', 'task']]
131
 
132
  def get_zip_data_link():
133
+ return (
134
+ '<a href="https://example.com/download.zip" '
135
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
136
+ 'color: black; font-family: \'Inter\', sans-serif;">Download Logs</a>'
 
 
 
 
 
 
137
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ with gr.Blocks() as demo:
140
+ # --- Custom CSS for layout and styling ---
141
+ gr.HTML('''
142
+ <style>
143
+ /* Evenly space the header links */
144
+ .header-links {
145
+ display: flex;
146
+ justify-content: space-evenly;
147
+ align-items: center;
148
+ margin: 10px 0;
149
+ }
150
+ /* Center the subtitle text */
151
+ .centered-subtitle {
152
+ text-align: center;
153
+ font-size: 1.2em;
154
+ margin-bottom: 20px;
155
+ }
156
+ /* Full width container for matching widget edges */
157
+ .full-width {
158
+ width: 100% !important;
159
+ }
160
+ </style>
161
+ ''')
162
 
 
163
  # --- Header Links (at the very top) ---
164
+ with gr.Row(elem_classes="header-links"):
165
  submission_link = gr.HTML(
166
  '<a href="https://huggingface.co/spaces/AIEnergyScore/submission_portal" '
167
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
168
  'color: black; font-family: \'Inter\', sans-serif;">Submission Portal</a>'
169
  )
170
  label_link = gr.HTML(
171
  '<a href="https://huggingface.co/spaces/AIEnergyScore/Label" '
172
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
173
  'color: black; font-family: \'Inter\', sans-serif;">Label Generator</a>'
174
  )
175
  faq_link = gr.HTML(
176
  '<a href="https://huggingface.github.io/AIEnergyScore/#faq" '
177
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
178
  'color: black; font-family: \'Inter\', sans-serif;">FAQ</a>'
179
  )
180
  documentation_link = gr.HTML(
181
  '<a href="https://huggingface.github.io/AIEnergyScore/#documentation" '
182
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
183
  'color: black; font-family: \'Inter\', sans-serif;">Documentation</a>'
184
  )
185
  download_link = gr.HTML(get_zip_data_link())
186
  community_link = gr.HTML(
187
  '<a href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" '
188
+ 'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
189
  'color: black; font-family: \'Inter\', sans-serif;">Community</a>'
190
  )
191
 
 
194
  <div style="margin-top: 0px;">
195
  <img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png"
196
  alt="Logo"
197
+ style="display: block; margin: 0 auto; max-width: 400px; height: auto;">
198
  </div>
199
  ''')
200
 
 
201
  # --- Subtitle (centered) ---
202
+ gr.Markdown('<p class="centered-subtitle">Welcome to the AI Energy Score Leaderboard. Select the task to see scored model results.</p>')
203
+
204
+ # --- Main UI Container (ensuring matching edges) ---
205
+ with gr.Column(elem_classes="full-width"):
206
+ with gr.Row():
207
+ with gr.Column():
208
+ task = gr.Dropdown(
209
+ choices=list(task_mappings.keys()),
210
+ label="Choose a benchmark task",
211
+ value='Text Generation',
212
+ multiselect=False,
213
+ interactive=True,
 
 
 
 
 
 
 
 
 
214
  )
215
+ with gr.Column():
216
+ model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
 
 
217
 
218
+ with gr.Row():
219
+ with gr.Column():
220
+ submit_button = gr.Button("Submit for Analysis")
221
+ submission_result = gr.Markdown()
222
+ submit_button.click(
223
+ fn=add_new_eval,
224
+ inputs=[model_name_textbox, task],
225
+ outputs=submission_result,
226
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
+ # --- Docker Log Submission (Simplified) ---
229
+ with gr.Accordion("Submit log files from a Docker run:", open=False):
230
+ gr.Markdown("""
231
+ **⚠️ Warning: By uploading the zip file, you confirm that you have read and agree to the following terms:**
232
+
233
+ - **Public Data Sharing:** You consent to the public sharing of the energy performance data derived from your submission. No additional information related to this model, including proprietary configurations, will be disclosed.
234
+ - **Data Integrity:** You certify that the log files submitted are accurate, unaltered, and generated directly from testing your model as per the specified benchmarking procedures.
235
+ - **Model Representation:** You affirm that the model tested and submitted is representative of the production-level version, including its level of quantization and any other relevant characteristics impacting energy efficiency and performance.
236
+ """)
237
+ file_output = gr.File(visible=False)
238
+ u = gr.UploadButton("Upload a zip file with logs", file_count="single", interactive=True)
239
+ u.upload(add_docker_eval, u, file_output)
240
 
241
+ # --- Leaderboard and Recent Models Accordions ---
242
+ with gr.Row():
243
+ with gr.Column():
244
+ with gr.Accordion("Models that are in the latest leaderboard version:", open=False, visible=False):
245
+ gr.Dataframe(get_leaderboard_models(), elem_classes="full-width")
246
+ with gr.Accordion("Models that have been benchmarked recently:", open=False, visible=False):
247
+ gr.Dataframe(formatted_df, elem_classes="full-width")
248
+
249
+ demo.launch()