Spaces:
Runtime error
Runtime error
File size: 7,668 Bytes
af35bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
import gradio as gr
import pandas as pd
import yaml
import json
import os
from lm_eval import tasks, evaluator
from datetime import datetime
from huggingface_hub import HfApi
import plotly.express as px
class LeaderboardSpace:
def __init__(self, space_name="ozayezerceli/PoCLeaderboard"):
self.space_name = space_name
self.results_dir = "benchmark_results"
self.leaderboard_file = os.path.join(self.results_dir, "leaderboard.json")
os.makedirs(self.results_dir, exist_ok=True)
self.load_leaderboard()
self.api = HfApi()
def load_leaderboard(self):
if os.path.exists(self.leaderboard_file):
with open(self.leaderboard_file, 'r') as f:
self.leaderboard = json.load(f)
else:
self.leaderboard = {"models": [], "results": {}}
def save_leaderboard(self):
with open(self.leaderboard_file, 'w') as f:
json.dump(self.leaderboard, f, indent=2)
# Push updated leaderboard to Space
self.api.upload_file(
path_or_fileobj=self.leaderboard_file,
path_in_repo=self.leaderboard_file,
repo_id=self.space_name,
repo_type="space"
)
def get_leaderboard_df(self):
if not self.leaderboard["models"]:
return pd.DataFrame()
data = []
for model in self.leaderboard["models"]:
result = self.leaderboard["results"][model]
row = {"Model": model, "Timestamp": result["timestamp"]}
for task, scores in result["scores"].items():
for metric, value in scores.items():
row[f"{task}_{metric}"] = round(value * 100, 2)
data.append(row)
return pd.DataFrame(data)
def create_leaderboard_plot(self):
df = self.get_leaderboard_df()
if df.empty:
return None
# Melt the DataFrame to create a format suitable for plotting
metrics_cols = [col for col in df.columns if col not in ["Model", "Timestamp"]]
df_melted = df.melt(
id_vars=["Model"],
value_vars=metrics_cols,
var_name="Metric",
value_name="Score"
)
# Create a grouped bar plot
fig = px.bar(
df_melted,
x="Model",
y="Score",
color="Metric",
title="Model Performance Across Tasks",
barmode="group"
)
fig.update_layout(
yaxis_title="Score (%)",
xaxis_title="Model",
legend_title="Metric"
)
return fig
def create_interface():
space = LeaderboardSpace()
with gr.Blocks() as demo:
gr.Markdown("# 🏆 Model Evaluation Leaderboard")
with gr.Tab("Leaderboard"):
with gr.Row():
leaderboard_plot = gr.Plot()
with gr.Row():
leaderboard_table = gr.DataFrame()
with gr.Tab("Submit Evaluation"):
with gr.Row():
with gr.Column():
model_name = gr.Textbox(label="Model Name")
model_id = gr.Textbox(label="Hugging Face Model ID")
# Task selection
available_tasks = tasks.LIST_OF_PUBLIC_TASKS
task_selection = gr.Checkboxgroup(
choices=available_tasks,
label="Select Tasks"
)
submit_btn = gr.Button("Submit Evaluation")
with gr.Row():
evaluation_status = gr.Textbox(
label="Evaluation Status",
interactive=False
)
with gr.Tab("Custom Tasks"):
with gr.Row():
with gr.Column():
task_name = gr.Textbox(label="Task Name")
task_description = gr.Textbox(
label="Task Description",
lines=3
)
example_file = gr.File(
label="Upload Examples (JSON)",
file_types=[".json"]
)
submit_task_btn = gr.Button("Submit Custom Task")
with gr.Row():
task_status = gr.Textbox(
label="Task Status",
interactive=False
)
# Define update functions
def update_leaderboard():
df = space.get_leaderboard_df()
plot = space.create_leaderboard_plot()
return df, plot
def submit_evaluation(model_name, model_id, selected_tasks):
try:
# Initialize evaluation
results = evaluator.simple_evaluate(
model=model_id,
tasks=selected_tasks,
num_fewshot=0,
batch_size=1
)
# Update leaderboard
if model_name not in space.leaderboard["models"]:
space.leaderboard["models"].append(model_name)
space.leaderboard["results"][model_name] = {
"timestamp": datetime.now().isoformat(),
"model_id": model_id,
"scores": results
}
space.save_leaderboard()
return "Evaluation completed successfully!", *update_leaderboard()
except Exception as e:
return f"Error during evaluation: {str(e)}", None, None
def submit_custom_task(task_name, description, file):
try:
# Load and validate task data
task_data = json.load(open(file.name))
# Save task configuration
task_config = {
"name": task_name,
"description": description,
"data": task_data
}
task_file = os.path.join(space.results_dir, f"task_{task_name}.json")
with open(task_file, 'w') as f:
json.dump(task_config, f, indent=2)
# Upload to Space
space.api.upload_file(
path_or_fileobj=task_file,
path_in_repo=task_file,
repo_id=space.space_name,
repo_type="space"
)
return "Custom task added successfully!"
except Exception as e:
return f"Error adding custom task: {str(e)}"
# Connect components
submit_btn.click(
submit_evaluation,
inputs=[model_name, model_id, task_selection],
outputs=[evaluation_status, leaderboard_table, leaderboard_plot]
)
submit_task_btn.click(
submit_custom_task,
inputs=[task_name, task_description, example_file],
outputs=[task_status]
)
# Initial loading of leaderboard
demo.load(
update_leaderboard,
outputs=[leaderboard_table, leaderboard_plot]
)
return demo
# Launch the interface
if __name__ == "__main__":
demo = create_interface()
demo.launch() |