Spaces:
Sleeping
Sleeping
Commit
Β·
681ab87
1
Parent(s):
abd3ce9
add tabs and links
Browse files
app.py
CHANGED
@@ -33,20 +33,20 @@ def restart_space():
|
|
33 |
API.restart_space(repo_id=REPO_ID)
|
34 |
|
35 |
### Space initialisation
|
36 |
-
try:
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
except Exception:
|
42 |
-
|
43 |
-
try:
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
except Exception:
|
49 |
-
|
50 |
|
51 |
|
52 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
@@ -96,7 +96,10 @@ with demo:
|
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
with gr.TabItem("π
Detector Leaderboard", elem_id="detector-benchmark-tab-table", id=0):
|
99 |
-
|
|
|
|
|
|
|
100 |
|
101 |
with gr.TabItem("π Detector Playground ", elem_id="detector-playground-tab-table", id=1):
|
102 |
with gr.Row():
|
|
|
33 |
API.restart_space(repo_id=REPO_ID)
|
34 |
|
35 |
### Space initialisation
|
36 |
+
# try:
|
37 |
+
# print(EVAL_REQUESTS_PATH)
|
38 |
+
# snapshot_download(
|
39 |
+
# repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
40 |
+
# )
|
41 |
+
# except Exception:
|
42 |
+
# restart_space()
|
43 |
+
# try:
|
44 |
+
# print(EVAL_RESULTS_PATH)
|
45 |
+
# snapshot_download(
|
46 |
+
# repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
47 |
+
# )
|
48 |
+
# except Exception:
|
49 |
+
# restart_space()
|
50 |
|
51 |
|
52 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
|
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
with gr.TabItem("π
Detector Leaderboard", elem_id="detector-benchmark-tab-table", id=0):
|
99 |
+
with gr.TabItem("Generated Image Detection", elem_id="detector-benchmark-tab-table-1", id=3):
|
100 |
+
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
101 |
+
with gr.TabItem("In-n-out-painting Detection", elem_id="detector-benchmark-tab-table-2", id=4):
|
102 |
+
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
103 |
|
104 |
with gr.TabItem("π Detector Playground ", elem_id="detector-playground-tab-table", id=1):
|
105 |
with gr.Row():
|
eval-results/demo-leaderboard/DE-FAKE/results.json
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
{
|
2 |
"config": {
|
3 |
-
"model_name": "DE-FAKE"
|
|
|
4 |
},
|
5 |
"results": {
|
6 |
"miragenews": {
|
7 |
-
"acc": 0
|
8 |
},
|
9 |
"genimage": {
|
10 |
"acc": 0.90
|
|
|
1 |
{
|
2 |
"config": {
|
3 |
+
"model_name": "DE-FAKE",
|
4 |
+
"model_link": "https://github.com/zeyangsha/De-Fake/tree/main"
|
5 |
},
|
6 |
"results": {
|
7 |
"miragenews": {
|
8 |
+
"acc": 0.55
|
9 |
},
|
10 |
"genimage": {
|
11 |
"acc": 0.90
|
eval-results/demo-leaderboard/DIRE/results.json
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
{
|
2 |
"config": {
|
3 |
-
"model_name": "DIRE"
|
|
|
4 |
},
|
5 |
"results": {
|
6 |
"miragenews": {
|
7 |
-
"acc": 0
|
8 |
},
|
9 |
"genimage": {
|
10 |
-
"acc": 0.
|
11 |
}
|
12 |
}
|
13 |
}
|
|
|
1 |
{
|
2 |
"config": {
|
3 |
+
"model_name": "DIRE",
|
4 |
+
"model_link": "https://github.com/ZhendongWang6/DIRE"
|
5 |
},
|
6 |
"results": {
|
7 |
"miragenews": {
|
8 |
+
"acc": 0.87
|
9 |
},
|
10 |
"genimage": {
|
11 |
+
"acc": 0.68
|
12 |
}
|
13 |
}
|
14 |
}
|
eval-results/demo-leaderboard/Universal/results.json
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
{
|
2 |
"config": {
|
3 |
-
"model_name": "
|
|
|
4 |
},
|
5 |
"results": {
|
6 |
"miragenews": {
|
7 |
-
"acc": 0
|
8 |
},
|
9 |
"genimage": {
|
10 |
-
"acc": 0.
|
11 |
}
|
12 |
}
|
13 |
}
|
|
|
1 |
{
|
2 |
"config": {
|
3 |
+
"model_name": "UniversalFakeDetect",
|
4 |
+
"model_link": "https://utkarshojha.github.io/universal-fake-detection/"
|
5 |
},
|
6 |
"results": {
|
7 |
"miragenews": {
|
8 |
+
"acc": 0.42
|
9 |
},
|
10 |
"genimage": {
|
11 |
+
"acc": 0.73
|
12 |
}
|
13 |
}
|
14 |
}
|
src/display/formatting.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
def model_hyperlink(link, model_name):
|
2 |
-
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
|
4 |
|
5 |
def make_clickable_model(model_name):
|
|
|
1 |
def model_hyperlink(link, model_name):
|
2 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' if link else model_name
|
3 |
|
4 |
|
5 |
def make_clickable_model(model_name):
|
src/leaderboard/read_evals.py
CHANGED
@@ -7,7 +7,7 @@ from dataclasses import dataclass
|
|
7 |
import dateutil
|
8 |
import numpy as np
|
9 |
|
10 |
-
from src.display.formatting import
|
11 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
from src.submission.check_validity import is_model_on_hub
|
13 |
|
@@ -20,6 +20,7 @@ class EvalResult:
|
|
20 |
full_model: str # org/model (path on hub)
|
21 |
org: str
|
22 |
model: str
|
|
|
23 |
# revision: str # commit hash, "" if main
|
24 |
results: dict
|
25 |
# precision: Precision = Precision.Unknown
|
@@ -57,14 +58,15 @@ class EvalResult:
|
|
57 |
result_key = f"{org}_{model}"
|
58 |
full_model = "/".join(org_and_model)
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
)
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
68 |
|
69 |
# Extract results available in this file (some results are split in several files)
|
70 |
results = {}
|
@@ -82,6 +84,7 @@ class EvalResult:
|
|
82 |
return self(
|
83 |
eval_name=result_key,
|
84 |
full_model=full_model,
|
|
|
85 |
org=org,
|
86 |
model=model,
|
87 |
results=results,
|
@@ -91,21 +94,21 @@ class EvalResult:
|
|
91 |
# architecture=architecture
|
92 |
)
|
93 |
|
94 |
-
def update_with_request_file(self, requests_path):
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
|
110 |
def to_dict(self):
|
111 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
@@ -117,7 +120,7 @@ class EvalResult:
|
|
117 |
# AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
118 |
# AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
119 |
# AutoEvalColumn.architecture.name: self.architecture,
|
120 |
-
AutoEvalColumn.model.name:
|
121 |
# AutoEvalColumn.revision.name: self.revision,
|
122 |
AutoEvalColumn.average.name: average,
|
123 |
# AutoEvalColumn.license.name: self.license,
|
@@ -130,26 +133,26 @@ class EvalResult:
|
|
130 |
return data_dict
|
131 |
|
132 |
|
133 |
-
def get_request_file_for_model(requests_path, model_name):
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
|
154 |
|
155 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
|
|
7 |
import dateutil
|
8 |
import numpy as np
|
9 |
|
10 |
+
from src.display.formatting import model_hyperlink
|
11 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
from src.submission.check_validity import is_model_on_hub
|
13 |
|
|
|
20 |
full_model: str # org/model (path on hub)
|
21 |
org: str
|
22 |
model: str
|
23 |
+
model_link: str
|
24 |
# revision: str # commit hash, "" if main
|
25 |
results: dict
|
26 |
# precision: Precision = Precision.Unknown
|
|
|
58 |
result_key = f"{org}_{model}"
|
59 |
full_model = "/".join(org_and_model)
|
60 |
|
61 |
+
model_link = config.get("model_link", None)
|
62 |
+
# still_on_hub, _, model_config = is_model_on_hub(
|
63 |
+
# full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
64 |
+
# )
|
65 |
+
# architecture = "?"
|
66 |
+
# if model_config is not None:
|
67 |
+
# architectures = getattr(model_config, "architectures", None)
|
68 |
+
# if architectures:
|
69 |
+
# architecture = ";".join(architectures)
|
70 |
|
71 |
# Extract results available in this file (some results are split in several files)
|
72 |
results = {}
|
|
|
84 |
return self(
|
85 |
eval_name=result_key,
|
86 |
full_model=full_model,
|
87 |
+
model_link=model_link,
|
88 |
org=org,
|
89 |
model=model,
|
90 |
results=results,
|
|
|
94 |
# architecture=architecture
|
95 |
)
|
96 |
|
97 |
+
# def update_with_request_file(self, requests_path):
|
98 |
+
# """Finds the relevant request file for the current model and updates info with it"""
|
99 |
+
# request_file = get_request_file_for_model(requests_path, self.full_model)
|
100 |
+
|
101 |
+
# try:
|
102 |
+
# with open(request_file, "r") as f:
|
103 |
+
# request = json.load(f)
|
104 |
+
# self.model_type = ModelType.from_str(request.get("model_type", ""))
|
105 |
+
# self.weight_type = WeightType[request.get("weight_type", "Original")]
|
106 |
+
# self.license = request.get("license", "?")
|
107 |
+
# self.likes = request.get("likes", 0)
|
108 |
+
# self.num_params = request.get("params", 0)
|
109 |
+
# self.date = request.get("submitted_time", "")
|
110 |
+
# except Exception:
|
111 |
+
# print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
|
112 |
|
113 |
def to_dict(self):
|
114 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
|
|
120 |
# AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
121 |
# AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
122 |
# AutoEvalColumn.architecture.name: self.architecture,
|
123 |
+
AutoEvalColumn.model.name: model_hyperlink(self.model_link, self.full_model),
|
124 |
# AutoEvalColumn.revision.name: self.revision,
|
125 |
AutoEvalColumn.average.name: average,
|
126 |
# AutoEvalColumn.license.name: self.license,
|
|
|
133 |
return data_dict
|
134 |
|
135 |
|
136 |
+
# def get_request_file_for_model(requests_path, model_name):
|
137 |
+
# """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
138 |
+
# request_files = os.path.join(
|
139 |
+
# requests_path,
|
140 |
+
# f"{model_name}_eval_request_*.json",
|
141 |
+
# )
|
142 |
+
# request_files = glob.glob(request_files)
|
143 |
+
|
144 |
+
# # Select correct request file (precision)
|
145 |
+
# request_file = ""
|
146 |
+
# request_files = sorted(request_files, reverse=True)
|
147 |
+
# for tmp_request_file in request_files:
|
148 |
+
# with open(tmp_request_file, "r") as f:
|
149 |
+
# req_content = json.load(f)
|
150 |
+
# if (
|
151 |
+
# req_content["status"] in ["FINISHED"]
|
152 |
+
# # and req_content["precision"] == precision.split(".")[-1]
|
153 |
+
# ):
|
154 |
+
# request_file = tmp_request_file
|
155 |
+
# return request_file
|
156 |
|
157 |
|
158 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|