Spaces:
Sleeping
Sleeping
File size: 4,247 Bytes
be4b0eb 90a3578 be4b0eb 8b036d9 be4b0eb 5d76431 84bc8b3 be4b0eb 7c7f7ff 5d76431 8b036d9 cf623e7 84bc8b3 8b036d9 cf623e7 8b036d9 84bc8b3 c76ac5c a84381f 8b036d9 1661c7f 8b036d9 be4b0eb 8b036d9 84bc8b3 66438ba 1661c7f 84bc8b3 8b036d9 1661c7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
LEADERBOARD_HEADER = """
<style>
.header-gradient {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
background: linear-gradient(to right, #FF705B, #FFB457);
-webkit-text-fill-color: transparent;
-webkit-background-clip: text;
}
.header-normal {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
}
</style>
<div align="center">
<span class="header-gradient"> DD-Ranking </span>
<span class="header-normal"> Leaderboard </span>
</div>
<p align="center">
| <a href="https://nus-hpc-ai-lab.github.io/DD-Ranking/"><b>Documentation</b></a> | <a href="https://github.com/NUS-HPC-AI-Lab/DD-Ranking"><b>Github</b></a> | <a href=""><b>Paper </b> (Coming Soon)</a> | <a href=""><b>Twitter/X</b> (Coming Soon)</a> | <a href=""><b>Developer Slack</b> (Coming Soon)</a> |
</p>"""
LEADERBOARD_INTRODUCTION = """
# DD-Ranking Leaderboard
π Welcome to the leaderboard of the **DD-Ranking**!
> DD-Ranking (DD, i.e., Dataset Distillation) is an integrated and easy-to-use benchmark for dataset distillation. It aims to provide a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.
- **Fair Evaluation**: DD-Ranking provides a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.
- **Easy-to-use**: DD-Ranking provides a unified interface for dataset distillation evaluation.
- **Extensible**: DD-Ranking supports various datasets and models.
- **Customizable**: DD-Ranking supports various data augmentations and soft label strategies.
**Join Leaderboard**: Please see the [instructions](https://github.com/NUS-HPC-AI-Lab/DD-Ranking/blob/main/CONTRIBUTING.md) to participate.
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
COMING SOON
"""
IPC_INFO = """
Images Per Class
"""
LABEL_TYPE_INFO = """
Hard labels are categorical, having the same format of the real dataset. Soft labels are generated by a teacher model pretrained on the target dataset
"""
WEIGHT_ADJUSTMENT_INTRODUCTION = """
The score for ranking (DD-Ranking Score, DDRS) in the following table is computed by $DDRS = \\frac{e^{w IOR - (1 - w) HLR} - e^{-1}}{e - e^{-1}}$, where $w$ is the weight for the HLR metric.
**You can specify the weight $w$ below.**
"""
METRIC_DEFINITION_INTRODUCTION = """
$\\text{Acc.}$: The accuracy of models trained on different samples.
$\\text{full-hard}$: Full dataset with hard labels.
$\\text{syn-hard}$: Synthetic dataset with hard labels.
$\\text{syn-any}$: Synthetic dataset with personalized evaluation methods (hard or soft labels).
$\\text{rdm-any}$: Randomly selected dataset (under the same compression ratio) with the same personalized evaluation methods.
$\\text{HLR} = \\text{Acc.} \\text{full-hard} - \\text{Acc.} \\text{syn-hard}$: The degree to which the original dataset is recovered under hard labels (hard label recovery).
$\\text{IOR} = \\text{Acc.} \\text{syn-any} - \\text{Acc.} \\text{rdm-any}$: The improvement over random selection when using personalized evaluation methods (improvement over random).
"""
DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
DATASET_IPC_LIST = {
"CIFAR-10": ["IPC-1", "IPC-10", "IPC-50"],
"CIFAR-100": ["IPC-1", "IPC-10", "IPC-50"],
"Tiny-ImageNet": ["IPC-10", "IPC-50"],
}
LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]
METRICS = ["HLR", "IOR"]
METRICS_SIGN = [1.0, -1.0]
COLUMN_NAMES = ["Ranking", "Method", "Verified", "Date", "Label Type", "HLR%", "IOR%", "DDRS"]
DATA_TITLE_TYPE = ['number', 'markdown', 'markdown', 'markdown', 'markdown', 'number', 'number', 'number']
DATASET_MAPPING = {
"CIFAR-10": 0,
"CIFAR-100": 1,
"Tiny-ImageNet": 2,
}
IPC_MAPPING = {
"IPC-1": 0,
"IPC-10": 1,
"IPC-50": 2,
}
LABEL_MAPPING = {
"Hard Label": 0,
"Soft Label": 1,
} |