Spaces:
Runtime error
Runtime error
File size: 1,384 Bytes
9094cc2 0d5e0f7 9094cc2 0d5e0f7 544774d 0d5e0f7 544774d 0d5e0f7 544774d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import streamlit as st
from huggingface_hub import DatasetFilter, HfApi, ModelFilter
api = HfApi()
def get_metadata(dataset_name):
filt = DatasetFilter(dataset_name=dataset_name)
data = api.list_datasets(filter=filt, full=True)
return data[0].cardData["train-eval-index"]
def get_compatible_models(task, dataset_name):
filt = ModelFilter(task=task, trained_dataset=dataset_name)
compatible_models = api.list_models(filter=filt)
return [model.modelId for model in compatible_models]
with st.form(key="form"):
dataset_name = st.selectbox("Select a dataset to evaluate on", ["lewtun/autoevaluate_emotion"])
metadata = get_metadata(dataset_name)
# st.write(metadata)
dataset_config = st.selectbox("Select the subset to evaluate on", [metadata[0]["config"]])
splits = metadata[0]["splits"]
# st.write(splits)
evaluation_split = st.selectbox("Select the split to evaluate on", [v for d in splits for k, v in d.items()])
compatible_models = get_compatible_models(metadata[0]["task"], dataset_name.split("/")[-1].split("_")[-1])
options = st.multiselect("Select the models you wish to evaluate", compatible_models, compatible_models[0])
submit_button = st.form_submit_button("Make Submission")
if submit_button:
st.success(f"✅ Evaluation was successfully submitted for evaluation with job ID 42")
|