|
|
|
|
|
|
|
import pandas as pd |
|
import gradio as gr |
|
from ui.leaderboard import render_leader_board, render_info_html |
|
from ui.df_arena_tool import render_tool_info |
|
from ui.submission import render_submission_page |
|
import os |
|
from utils import load_leaderboard |
|
from huggingface_hub import snapshot_download |
|
import gradio as gr |
|
import os |
|
import json |
|
|
|
REPO_ID = os.getenv('REPO_ID') |
|
DB_ERR_PATH = f'./data/data/leaderboard_err.csv' |
|
DB_ACCURACY_PATH = f'./data/data/leaderboard_accuracy.csv' |
|
CITATIONS_PATH = f'./data/data/model_citations.json' |
|
|
|
if not os.path.exists('./data/data'): |
|
snapshot_download(repo_id=REPO_ID, |
|
repo_type="dataset", local_dir='./data/data') |
|
|
|
|
|
with open(CITATIONS_PATH, 'r') as f: |
|
model_citations = json.load(f) |
|
|
|
|
|
leaderboard_df_err = load_leaderboard(DB_ERR_PATH) |
|
leaderboard_df_accuracy = load_leaderboard(DB_ACCURACY_PATH) |
|
|
|
|
|
|
|
custom_css = """ |
|
h1, { |
|
font-size: 50px !important; /* Increase heading sizes */ |
|
line-height: 2.0 !important; /* Increase line spacing */ |
|
text-align: center !important; /* Center align headings */ |
|
} |
|
|
|
.gradio-container { |
|
padding: 30px !important; /* Increase padding around the UI */ |
|
} |
|
|
|
.markdown-body p { |
|
font-size: 30px !important; /* Increase text size */ |
|
line-height: 2.0 !important; /* More space between lines */ |
|
} |
|
|
|
.gradio-container .gr-block { |
|
margin-bottom: 20px !important; /* Add more space between elements */ |
|
} |
|
""" |
|
|
|
|
|
def create_ui(): |
|
with gr.Blocks(theme=gr.themes.Soft(text_size=gr.themes.sizes.text_lg), css=custom_css) as demo: |
|
|
|
gr.Image('/data/code/DF_arena_leaderboard/leaderboard/data/df_arena.jpg') |
|
|
|
with gr.Tabs(): |
|
with gr.Tab("π Leaderboard"): |
|
with gr.Column(): |
|
render_info_html() |
|
gr.Markdown("Table for Equal Error Rate (EER %) for different systems") |
|
render_leader_board(leaderboard_df_err, model_citations) |
|
gr.Markdown("Table for Accuracy (EER %) for different systems") |
|
|
|
render_leader_board(leaderboard_df_accuracy, model_citations) |
|
|
|
with gr.Tab("π Evaluation"): |
|
render_tool_info() |
|
with gr.Tab("π€ Submission"): |
|
render_submission_page() |
|
|
|
return demo |
|
|
|
|
|
|
|
create_ui().launch() |
|
|