Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,253 +1,191 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
|
|
2 |
import pandas as pd
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
publisher = {Hugging Face},
|
13 |
-
howpublished = "\url{https://huggingface.co/spaces/AIEnergyScore/Leaderboard}",
|
14 |
-
}"""
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
'object_detection.csv',
|
20 |
-
'text_classification.csv',
|
21 |
-
'image_captioning.csv',
|
22 |
-
'question_answering.csv',
|
23 |
-
'text_generation.csv',
|
24 |
-
'image_classification.csv',
|
25 |
-
'sentence_similarity.csv',
|
26 |
-
'image_generation.csv',
|
27 |
-
'summarization.csv'
|
28 |
-
]
|
29 |
|
30 |
-
def
|
|
|
31 |
try:
|
32 |
-
|
33 |
-
except
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
49 |
else:
|
50 |
-
|
51 |
-
|
52 |
-
def
|
53 |
-
""
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
64 |
else:
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
)
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
def get_zip_data_link():
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
b64 = base64.b64encode(data).decode()
|
114 |
-
href = (
|
115 |
-
f'<a href="data:application/zip;base64,{b64}" '
|
116 |
-
'download="data.zip" '
|
117 |
-
'style="margin: 0 15px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
118 |
-
'color: black; font-family: \'Inter\', sans-serif;">Download Data</a>'
|
119 |
)
|
120 |
-
return href
|
121 |
-
|
122 |
-
# --- Modified functions to include a sort_order parameter ---
|
123 |
-
def get_model_names_html(task, sort_order="Low to High"):
|
124 |
-
df = pd.read_csv('data/energy/' + task)
|
125 |
-
if df.columns[0].startswith("Unnamed:"):
|
126 |
-
df = df.iloc[:, 1:]
|
127 |
-
df['energy_score'] = df['energy_score'].astype(int)
|
128 |
-
# Convert kWh to Wh:
|
129 |
-
df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
|
130 |
-
df['Model'] = df['model'].apply(make_link)
|
131 |
-
df['Score'] = df['energy_score'].apply(format_stars)
|
132 |
-
ascending = True if sort_order == "Low to High" else False
|
133 |
-
df = df.sort_values(by='gpu_energy_numeric', ascending=ascending)
|
134 |
-
return generate_html_table_from_df(df)
|
135 |
-
|
136 |
-
def get_all_model_names_html(sort_order="Low to High"):
|
137 |
-
all_df = pd.DataFrame()
|
138 |
-
for task in tasks:
|
139 |
-
df = pd.read_csv('data/energy/' + task)
|
140 |
-
if df.columns[0].startswith("Unnamed:"):
|
141 |
-
df = df.iloc[:, 1:]
|
142 |
-
df['energy_score'] = df['energy_score'].astype(int)
|
143 |
-
df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
|
144 |
-
df['Model'] = df['model'].apply(make_link)
|
145 |
-
df['Score'] = df['energy_score'].apply(format_stars)
|
146 |
-
all_df = pd.concat([all_df, df], ignore_index=True)
|
147 |
-
all_df = all_df.drop_duplicates(subset=['model'])
|
148 |
-
ascending = True if sort_order == "Low to High" else False
|
149 |
-
all_df = all_df.sort_values(by='gpu_energy_numeric', ascending=ascending)
|
150 |
-
return generate_html_table_from_df(all_df)
|
151 |
-
|
152 |
-
def get_text_generation_model_names_html(model_class, sort_order="Low to High"):
|
153 |
-
df = pd.read_csv('data/energy/text_generation.csv')
|
154 |
-
if df.columns[0].startswith("Unnamed:"):
|
155 |
-
df = df.iloc[:, 1:]
|
156 |
-
if 'class' in df.columns:
|
157 |
-
df = df[df['class'] == model_class]
|
158 |
-
df['energy_score'] = df['energy_score'].astype(int)
|
159 |
-
df['gpu_energy_numeric'] = pd.to_numeric(df['total_gpu_energy'], errors='raise') * 1000
|
160 |
-
df['Model'] = df['model'].apply(make_link)
|
161 |
-
df['Score'] = df['energy_score'].apply(format_stars)
|
162 |
-
ascending = True if sort_order == "Low to High" else False
|
163 |
-
df = df.sort_values(by='gpu_energy_numeric', ascending=ascending)
|
164 |
-
return generate_html_table_from_df(df)
|
165 |
-
|
166 |
-
# --- Update functions for dropdown changes ---
|
167 |
-
|
168 |
-
# For Text Generation, two dropdowns: model class and sort order.
|
169 |
-
def update_text_generation(selected_display, sort_order):
|
170 |
-
mapping = {
|
171 |
-
"A (Single Consumer GPU) <20B parameters": "A",
|
172 |
-
"B (Single Cloud GPU) 20-66B parameters": "B",
|
173 |
-
"C (Multiple Cloud GPUs) >66B parameters": "C"
|
174 |
-
}
|
175 |
-
model_class = mapping.get(selected_display, "A")
|
176 |
-
return get_text_generation_model_names_html(model_class, sort_order)
|
177 |
-
|
178 |
-
# For the other tabs, each update function simply takes the sort_order.
|
179 |
-
def update_image_generation(sort_order):
|
180 |
-
return get_model_names_html('image_generation.csv', sort_order)
|
181 |
-
|
182 |
-
def update_text_classification(sort_order):
|
183 |
-
return get_model_names_html('text_classification.csv', sort_order)
|
184 |
-
|
185 |
-
def update_image_classification(sort_order):
|
186 |
-
return get_model_names_html('image_classification.csv', sort_order)
|
187 |
-
|
188 |
-
def update_image_captioning(sort_order):
|
189 |
-
return get_model_names_html('image_captioning.csv', sort_order)
|
190 |
-
|
191 |
-
def update_summarization(sort_order):
|
192 |
-
return get_model_names_html('summarization.csv', sort_order)
|
193 |
-
|
194 |
-
def update_asr(sort_order):
|
195 |
-
return get_model_names_html('asr.csv', sort_order)
|
196 |
-
|
197 |
-
def update_object_detection(sort_order):
|
198 |
-
return get_model_names_html('object_detection.csv', sort_order)
|
199 |
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
.
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
|
224 |
-
with demo:
|
225 |
# --- Header Links (at the very top) ---
|
226 |
-
with gr.Row():
|
227 |
submission_link = gr.HTML(
|
228 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/submission_portal" '
|
229 |
-
'style="
|
230 |
'color: black; font-family: \'Inter\', sans-serif;">Submission Portal</a>'
|
231 |
)
|
232 |
label_link = gr.HTML(
|
233 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/Label" '
|
234 |
-
'style="
|
235 |
'color: black; font-family: \'Inter\', sans-serif;">Label Generator</a>'
|
236 |
)
|
237 |
faq_link = gr.HTML(
|
238 |
'<a href="https://huggingface.github.io/AIEnergyScore/#faq" '
|
239 |
-
'style="
|
240 |
'color: black; font-family: \'Inter\', sans-serif;">FAQ</a>'
|
241 |
)
|
242 |
documentation_link = gr.HTML(
|
243 |
'<a href="https://huggingface.github.io/AIEnergyScore/#documentation" '
|
244 |
-
'style="
|
245 |
'color: black; font-family: \'Inter\', sans-serif;">Documentation</a>'
|
246 |
)
|
247 |
download_link = gr.HTML(get_zip_data_link())
|
248 |
community_link = gr.HTML(
|
249 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" '
|
250 |
-
'style="
|
251 |
'color: black; font-family: \'Inter\', sans-serif;">Community</a>'
|
252 |
)
|
253 |
|
@@ -256,147 +194,56 @@ with demo:
|
|
256 |
<div style="margin-top: 0px;">
|
257 |
<img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png"
|
258 |
alt="Logo"
|
259 |
-
style="display: block; margin: 0 auto; max-width:
|
260 |
</div>
|
261 |
''')
|
262 |
|
263 |
-
|
264 |
# --- Subtitle (centered) ---
|
265 |
-
gr.Markdown('<p
|
266 |
-
|
267 |
-
# ---
|
268 |
-
with gr.
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
"
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
model_class_dropdown = gr.Dropdown(
|
278 |
-
choices=model_class_options,
|
279 |
-
label="Select Model Class",
|
280 |
-
value=model_class_options[0]
|
281 |
-
)
|
282 |
-
sort_dropdown_tg = gr.Dropdown(
|
283 |
-
choices=["Low to High", "High to Low"],
|
284 |
-
label="Sort",
|
285 |
-
value="Low to High"
|
286 |
)
|
287 |
-
|
288 |
-
|
289 |
-
model_class_dropdown.change(fn=update_text_generation, inputs=[model_class_dropdown, sort_dropdown_tg], outputs=tg_table)
|
290 |
-
sort_dropdown_tg.change(fn=update_text_generation, inputs=[model_class_dropdown, sort_dropdown_tg], outputs=tg_table)
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
# --- Text Classification Tab ---
|
303 |
-
with gr.TabItem("Text Classification 🎭"):
|
304 |
-
sort_dropdown_tc = gr.Dropdown(
|
305 |
-
choices=["Low to High", "High to Low"],
|
306 |
-
label="Sort",
|
307 |
-
value="Low to High"
|
308 |
-
)
|
309 |
-
tc_table = gr.HTML(get_model_names_html('text_classification.csv', "Low to High"))
|
310 |
-
sort_dropdown_tc.change(fn=update_text_classification, inputs=sort_dropdown_tc, outputs=tc_table)
|
311 |
-
|
312 |
-
# --- Image Classification Tab ---
|
313 |
-
with gr.TabItem("Image Classification 🖼️"):
|
314 |
-
sort_dropdown_ic = gr.Dropdown(
|
315 |
-
choices=["Low to High", "High to Low"],
|
316 |
-
label="Sort",
|
317 |
-
value="Low to High"
|
318 |
-
)
|
319 |
-
ic_table = gr.HTML(get_model_names_html('image_classification.csv', "Low to High"))
|
320 |
-
sort_dropdown_ic.change(fn=update_image_classification, inputs=sort_dropdown_ic, outputs=ic_table)
|
321 |
-
|
322 |
-
# --- Image Captioning Tab ---
|
323 |
-
with gr.TabItem("Image Captioning 📝"):
|
324 |
-
sort_dropdown_icap = gr.Dropdown(
|
325 |
-
choices=["Low to High", "High to Low"],
|
326 |
-
label="Sort",
|
327 |
-
value="Low to High"
|
328 |
-
)
|
329 |
-
icap_table = gr.HTML(get_model_names_html('image_captioning.csv', "Low to High"))
|
330 |
-
sort_dropdown_icap.change(fn=update_image_captioning, inputs=sort_dropdown_icap, outputs=icap_table)
|
331 |
-
|
332 |
-
# --- Summarization Tab ---
|
333 |
-
with gr.TabItem("Summarization 📃"):
|
334 |
-
sort_dropdown_sum = gr.Dropdown(
|
335 |
-
choices=["Low to High", "High to Low"],
|
336 |
-
label="Sort",
|
337 |
-
value="Low to High"
|
338 |
-
)
|
339 |
-
sum_table = gr.HTML(get_model_names_html('summarization.csv', "Low to High"))
|
340 |
-
sort_dropdown_sum.change(fn=update_summarization, inputs=sort_dropdown_sum, outputs=sum_table)
|
341 |
-
|
342 |
-
# --- Automatic Speech Recognition Tab ---
|
343 |
-
with gr.TabItem("Automatic Speech Recognition 💬"):
|
344 |
-
sort_dropdown_asr = gr.Dropdown(
|
345 |
-
choices=["Low to High", "High to Low"],
|
346 |
-
label="Sort",
|
347 |
-
value="Low to High"
|
348 |
-
)
|
349 |
-
asr_table = gr.HTML(get_model_names_html('asr.csv', "Low to High"))
|
350 |
-
sort_dropdown_asr.change(fn=update_asr, inputs=sort_dropdown_asr, outputs=asr_table)
|
351 |
-
|
352 |
-
# --- Object Detection Tab ---
|
353 |
-
with gr.TabItem("Object Detection 🚘"):
|
354 |
-
sort_dropdown_od = gr.Dropdown(
|
355 |
-
choices=["Low to High", "High to Low"],
|
356 |
-
label="Sort",
|
357 |
-
value="Low to High"
|
358 |
-
)
|
359 |
-
od_table = gr.HTML(get_model_names_html('object_detection.csv', "Low to High"))
|
360 |
-
sort_dropdown_od.change(fn=update_object_detection, inputs=sort_dropdown_od, outputs=od_table)
|
361 |
-
|
362 |
-
# --- Sentence Similarity Tab ---
|
363 |
-
with gr.TabItem("Sentence Similarity 📚"):
|
364 |
-
sort_dropdown_ss = gr.Dropdown(
|
365 |
-
choices=["Low to High", "High to Low"],
|
366 |
-
label="Sort",
|
367 |
-
value="Low to High"
|
368 |
-
)
|
369 |
-
ss_table = gr.HTML(get_model_names_html('sentence_similarity.csv', "Low to High"))
|
370 |
-
sort_dropdown_ss.change(fn=update_sentence_similarity, inputs=sort_dropdown_ss, outputs=ss_table)
|
371 |
|
372 |
-
# ---
|
373 |
-
with gr.
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
|
|
|
|
|
|
381 |
|
382 |
-
# ---
|
383 |
-
with gr.
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
with gr.Accordion("📙 Citation", open=False):
|
393 |
-
citation_button = gr.Textbox(
|
394 |
-
value=CITATION_BUTTON_TEXT,
|
395 |
-
label=CITATION_BUTTON_LABEL,
|
396 |
-
elem_id="citation-button",
|
397 |
-
lines=10,
|
398 |
-
show_copy_button=True,
|
399 |
-
)
|
400 |
-
gr.Markdown("""Last updated: February 2025""")
|
401 |
-
|
402 |
-
demo.launch()
|
|
|
1 |
+
import os, glob
|
2 |
+
import json
|
3 |
+
from datetime import datetime, timezone
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from datasets import load_dataset, Dataset
|
6 |
import pandas as pd
|
7 |
+
import gradio as gr
|
8 |
+
from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
|
9 |
+
from enum import Enum
|
10 |
+
|
11 |
+
OWNER = "AIEnergyScore"
|
12 |
+
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
|
13 |
+
TOKEN = os.environ.get("DEBUG")
|
14 |
+
API = HfApi(token=TOKEN)
|
15 |
+
|
16 |
+
task_mappings = {
|
17 |
+
'automatic speech recognition': 'automatic-speech-recognition',
|
18 |
+
'Object Detection': 'object-detection',
|
19 |
+
'Text Classification': 'text-classification',
|
20 |
+
'Image to Text': 'image-to-text',
|
21 |
+
'Question Answering': 'question-answering',
|
22 |
+
'Text Generation': 'text-generation',
|
23 |
+
'Image Classification': 'image-classification',
|
24 |
+
'Sentence Similarity': 'sentence-similarity',
|
25 |
+
'Image Generation': 'image-generation',
|
26 |
+
'Summarization': 'summarization'
|
27 |
+
}
|
28 |
|
29 |
+
@dataclass
|
30 |
+
class ModelDetails:
|
31 |
+
name: str
|
32 |
+
display_name: str = ""
|
33 |
+
symbol: str = "" # emoji
|
|
|
|
|
|
|
34 |
|
35 |
+
def start_compute_space():
|
36 |
+
API.restart_space(COMPUTE_SPACE)
|
37 |
+
gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
def get_model_size(model_info: ModelInfo):
|
40 |
+
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
41 |
try:
|
42 |
+
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
43 |
+
except (AttributeError, TypeError):
|
44 |
+
return 0 # Unknown model sizes are indicated as 0
|
45 |
+
return model_size
|
46 |
+
|
47 |
+
def add_docker_eval(zip_file):
|
48 |
+
new_fid_list = zip_file.split("/")
|
49 |
+
new_fid = new_fid_list[-1]
|
50 |
+
if new_fid.endswith('.zip'):
|
51 |
+
API.upload_file(
|
52 |
+
path_or_fileobj=zip_file,
|
53 |
+
repo_id="AIEnergyScore/tested_proprietary_models",
|
54 |
+
path_in_repo='submitted_models/' + new_fid,
|
55 |
+
repo_type="dataset",
|
56 |
+
commit_message="Adding logs via submission Space.",
|
57 |
+
token=TOKEN
|
58 |
+
)
|
59 |
+
gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
|
60 |
else:
|
61 |
+
gr.Info('You can only upload .zip files here!')
|
62 |
+
|
63 |
+
def add_new_eval(repo_id: str, task: str):
|
64 |
+
model_owner = repo_id.split("/")[0]
|
65 |
+
model_name = repo_id.split("/")[1]
|
66 |
+
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
67 |
+
requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
68 |
+
requests_dset = requests.to_pandas()
|
69 |
+
model_list = requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
|
70 |
+
task_models = list(API.list_models(filter=task_mappings[task]))
|
71 |
+
task_model_names = [m.id for m in task_models]
|
72 |
+
if repo_id in model_list:
|
73 |
+
gr.Info('This model has already been run!')
|
74 |
+
elif repo_id not in task_model_names:
|
75 |
+
gr.Info("This model isn't compatible with the chosen task! Pick a different model-task combination")
|
76 |
else:
|
77 |
+
try:
|
78 |
+
model_info = API.model_info(repo_id=repo_id)
|
79 |
+
model_size = get_model_size(model_info=model_info)
|
80 |
+
likes = model_info.likes
|
81 |
+
except Exception:
|
82 |
+
gr.Info("Could not find information for model %s" % (model_name))
|
83 |
+
model_size = None
|
84 |
+
likes = None
|
85 |
+
|
86 |
+
gr.Info("Adding request")
|
87 |
+
request_dict = {
|
88 |
+
"model": repo_id,
|
89 |
+
"status": "PENDING",
|
90 |
+
"submitted_time": pd.to_datetime(current_time),
|
91 |
+
"task": task_mappings[task],
|
92 |
+
"likes": likes,
|
93 |
+
"params": model_size,
|
94 |
+
"leaderboard_version": "v0",
|
95 |
+
}
|
96 |
+
print("Writing out request file to dataset")
|
97 |
+
df_request_dict = pd.DataFrame([request_dict])
|
98 |
+
print(df_request_dict)
|
99 |
+
df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
|
100 |
+
updated_dset = Dataset.from_pandas(df_final)
|
101 |
+
updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
102 |
+
gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
|
103 |
+
return start_compute_space()
|
104 |
+
|
105 |
+
def print_existing_models():
|
106 |
+
requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
107 |
+
requests_dset = requests.to_pandas()
|
108 |
+
model_df = requests_dset[['model', 'status']]
|
109 |
+
model_df = model_df[model_df['status'] == 'COMPLETED']
|
110 |
+
return model_df
|
111 |
+
|
112 |
+
def highlight_cols(x):
|
113 |
+
df = x.copy()
|
114 |
+
df[df['status'] == 'COMPLETED'] = 'color: green'
|
115 |
+
df[df['status'] == 'PENDING'] = 'color: orange'
|
116 |
+
df[df['status'] == 'FAILED'] = 'color: red'
|
117 |
+
return df
|
118 |
+
|
119 |
+
# Applying the style function for the table
|
120 |
+
existing_models = print_existing_models()
|
121 |
+
formatted_df = existing_models.style.apply(highlight_cols, axis=None)
|
122 |
+
|
123 |
+
def get_leaderboard_models():
|
124 |
+
path = r'leaderboard_v0_data/energy'
|
125 |
+
filenames = glob.glob(path + "/*.csv")
|
126 |
+
data = []
|
127 |
+
for filename in filenames:
|
128 |
+
data.append(pd.read_csv(filename))
|
129 |
+
leaderboard_data = pd.concat(data, ignore_index=True)
|
130 |
+
return leaderboard_data[['model', 'task']]
|
131 |
|
132 |
def get_zip_data_link():
|
133 |
+
return (
|
134 |
+
'<a href="https://example.com/download.zip" '
|
135 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
136 |
+
'color: black; font-family: \'Inter\', sans-serif;">Download Logs</a>'
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
139 |
+
with gr.Blocks() as demo:
|
140 |
+
# --- Custom CSS for layout and styling ---
|
141 |
+
gr.HTML('''
|
142 |
+
<style>
|
143 |
+
/* Evenly space the header links */
|
144 |
+
.header-links {
|
145 |
+
display: flex;
|
146 |
+
justify-content: space-evenly;
|
147 |
+
align-items: center;
|
148 |
+
margin: 10px 0;
|
149 |
+
}
|
150 |
+
/* Center the subtitle text */
|
151 |
+
.centered-subtitle {
|
152 |
+
text-align: center;
|
153 |
+
font-size: 1.2em;
|
154 |
+
margin-bottom: 20px;
|
155 |
+
}
|
156 |
+
/* Full width container for matching widget edges */
|
157 |
+
.full-width {
|
158 |
+
width: 100% !important;
|
159 |
+
}
|
160 |
+
</style>
|
161 |
+
''')
|
162 |
|
|
|
163 |
# --- Header Links (at the very top) ---
|
164 |
+
with gr.Row(elem_classes="header-links"):
|
165 |
submission_link = gr.HTML(
|
166 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/submission_portal" '
|
167 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
168 |
'color: black; font-family: \'Inter\', sans-serif;">Submission Portal</a>'
|
169 |
)
|
170 |
label_link = gr.HTML(
|
171 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/Label" '
|
172 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
173 |
'color: black; font-family: \'Inter\', sans-serif;">Label Generator</a>'
|
174 |
)
|
175 |
faq_link = gr.HTML(
|
176 |
'<a href="https://huggingface.github.io/AIEnergyScore/#faq" '
|
177 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
178 |
'color: black; font-family: \'Inter\', sans-serif;">FAQ</a>'
|
179 |
)
|
180 |
documentation_link = gr.HTML(
|
181 |
'<a href="https://huggingface.github.io/AIEnergyScore/#documentation" '
|
182 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
183 |
'color: black; font-family: \'Inter\', sans-serif;">Documentation</a>'
|
184 |
)
|
185 |
download_link = gr.HTML(get_zip_data_link())
|
186 |
community_link = gr.HTML(
|
187 |
'<a href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" '
|
188 |
+
'style="text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
189 |
'color: black; font-family: \'Inter\', sans-serif;">Community</a>'
|
190 |
)
|
191 |
|
|
|
194 |
<div style="margin-top: 0px;">
|
195 |
<img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png"
|
196 |
alt="Logo"
|
197 |
+
style="display: block; margin: 0 auto; max-width: 400px; height: auto;">
|
198 |
</div>
|
199 |
''')
|
200 |
|
|
|
201 |
# --- Subtitle (centered) ---
|
202 |
+
gr.Markdown('<p class="centered-subtitle">Welcome to the AI Energy Score Leaderboard. Select the task to see scored model results.</p>')
|
203 |
+
|
204 |
+
# --- Main UI Container (ensuring matching edges) ---
|
205 |
+
with gr.Column(elem_classes="full-width"):
|
206 |
+
with gr.Row():
|
207 |
+
with gr.Column():
|
208 |
+
task = gr.Dropdown(
|
209 |
+
choices=list(task_mappings.keys()),
|
210 |
+
label="Choose a benchmark task",
|
211 |
+
value='Text Generation',
|
212 |
+
multiselect=False,
|
213 |
+
interactive=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
)
|
215 |
+
with gr.Column():
|
216 |
+
model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
|
|
|
|
|
217 |
|
218 |
+
with gr.Row():
|
219 |
+
with gr.Column():
|
220 |
+
submit_button = gr.Button("Submit for Analysis")
|
221 |
+
submission_result = gr.Markdown()
|
222 |
+
submit_button.click(
|
223 |
+
fn=add_new_eval,
|
224 |
+
inputs=[model_name_textbox, task],
|
225 |
+
outputs=submission_result,
|
226 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
+
# --- Docker Log Submission (Simplified) ---
|
229 |
+
with gr.Accordion("Submit log files from a Docker run:", open=False):
|
230 |
+
gr.Markdown("""
|
231 |
+
**⚠️ Warning: By uploading the zip file, you confirm that you have read and agree to the following terms:**
|
232 |
+
|
233 |
+
- **Public Data Sharing:** You consent to the public sharing of the energy performance data derived from your submission. No additional information related to this model, including proprietary configurations, will be disclosed.
|
234 |
+
- **Data Integrity:** You certify that the log files submitted are accurate, unaltered, and generated directly from testing your model as per the specified benchmarking procedures.
|
235 |
+
- **Model Representation:** You affirm that the model tested and submitted is representative of the production-level version, including its level of quantization and any other relevant characteristics impacting energy efficiency and performance.
|
236 |
+
""")
|
237 |
+
file_output = gr.File(visible=False)
|
238 |
+
u = gr.UploadButton("Upload a zip file with logs", file_count="single", interactive=True)
|
239 |
+
u.upload(add_docker_eval, u, file_output)
|
240 |
|
241 |
+
# --- Leaderboard and Recent Models Accordions ---
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
with gr.Accordion("Models that are in the latest leaderboard version:", open=False, visible=False):
|
245 |
+
gr.Dataframe(get_leaderboard_models(), elem_classes="full-width")
|
246 |
+
with gr.Accordion("Models that have been benchmarked recently:", open=False, visible=False):
|
247 |
+
gr.Dataframe(formatted_df, elem_classes="full-width")
|
248 |
+
|
249 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|