Update meta_data.py
Browse files- meta_data.py +90 -87
meta_data.py
CHANGED
|
@@ -1,88 +1,91 @@
|
|
| 1 |
-
VLMEVALKIT_README = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/README.md'
|
| 2 |
-
# CONSTANTS-CITATION
|
| 3 |
-
CITATION_BUTTON_TEXT = r"""@article{guo2025sok,
|
| 4 |
-
title={{Frontier AI's Impact on the Cybersecurity Landscape}},
|
| 5 |
-
author={Guo, Wenbo and Potter, Yujin and Shi, Tianneng and Wang, Zhun and Zhang, Andy and Song, Dawn},
|
| 6 |
-
journal={arXiv preprint arXiv:2504.05408},
|
| 7 |
-
year={2025}
|
| 8 |
-
}
|
| 9 |
-
"""
|
| 10 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 11 |
-
# CONSTANTS-TEXT
|
| 12 |
-
LEADERBORAD_INTRODUCTION = """# Cybersecurity
|
| 13 |
-
### Welcome to
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
Code
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
| 88 |
"""
|
|
|
|
| 1 |
+
VLMEVALKIT_README = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/README.md'
|
| 2 |
+
# CONSTANTS-CITATION
|
| 3 |
+
CITATION_BUTTON_TEXT = r"""@article{guo2025sok,
|
| 4 |
+
title={{Frontier AI's Impact on the Cybersecurity Landscape}},
|
| 5 |
+
author={Guo, Wenbo and Potter, Yujin and Shi, Tianneng and Wang, Zhun and Zhang, Andy and Song, Dawn},
|
| 6 |
+
journal={arXiv preprint arXiv:2504.05408},
|
| 7 |
+
year={2025}
|
| 8 |
+
}
|
| 9 |
+
"""
|
| 10 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 11 |
+
# CONSTANTS-TEXT
|
| 12 |
+
LEADERBORAD_INTRODUCTION = """# Frontier AI Cybersecurity Observatory
|
| 13 |
+
### Welcome to Frontier AI Cybersecurity Observatory! This leaderboard is a collection of benchmarks relevant to cybersecurity capabilities.
|
| 14 |
+
|
| 15 |
+
Tracking AI capabilities in cybersecurity is essential for understanding emerging impacts and risks. Our Frontier AI Cybersecurity Observatory provides a centralized platform that aggregates relevant benchmarks, enabling the community to more easily monitor and assess the evolving cybersecurity capabilities of AI systems.
|
| 16 |
+
|
| 17 |
+
This leaderboard covers {} benchmarks.
|
| 18 |
+
|
| 19 |
+
This leaderboard was last updated: {} """
|
| 20 |
+
# CONSTANTS-FIELDS
|
| 21 |
+
# META_FIELDS = [
|
| 22 |
+
# 'Model'
|
| 23 |
+
# ]
|
| 24 |
+
|
| 25 |
+
DEFAULT_TASK = [
|
| 26 |
+
'Vulnerable Code Generation', 'Attack Generation', 'CTF', 'Cyber Knowledge', 'Pen Test', 'Vulnerability Detection', 'PoC Generation', 'Patching'
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
# The README file for each benchmark
|
| 30 |
+
LEADERBOARD_MD = {}
|
| 31 |
+
|
| 32 |
+
LEADERBOARD_MD['CyberSecEval-3'] = """CyberSecEval-3 is a security benchmarks for LLMs. CyberSecEval-3 assesses 8 different risks across two broad categories: risk to third parties, and risk to application developers and end users.
|
| 33 |
+
|
| 34 |
+
Paper: https://arxiv.org/abs/2408.01605
|
| 35 |
+
Code: https://github.com/meta-llama/PurpleLlama/tree/main/CybersecurityBenchmarks
|
| 36 |
+
"""
|
| 37 |
+
LEADERBOARD_MD['SecCodePLT'] = """ SecCodePLT is a unified and comprehensive evaluation platform for code GenAIs' risks. This benchmark consists of insecure coding tasks and cyberattack helpfulness tasks. The helpfulness tasks are designed considering five attack steps: reconnaissance, weaponization & infiltration, C2 & execution, discovery, and collection.
|
| 38 |
+
|
| 39 |
+
Paper: https://arxiv.org/abs/2410.11096
|
| 40 |
+
Code: https://github.com/CodeSecPLT/CodeSecPLT
|
| 41 |
+
"""
|
| 42 |
+
LEADERBOARD_MD['RedCode'] = """RedCode is a benchmark for risky code execution and generation: (1) RedCode-Exec provides challenging prompts that could lead to risky code execution, aiming to evaluate code agents' ability to recognize and handle unsafe code. (2) RedCode-Gen provides 160 prompts with function signatures and docstrings as input to assess whether code agents will follow instructions to generate harmful code or software.
|
| 43 |
+
|
| 44 |
+
Paper: https://arxiv.org/abs/2411.07781
|
| 45 |
+
Code: https://github.com/AI-secure/RedCode
|
| 46 |
+
"""
|
| 47 |
+
LEADERBOARD_MD['CyBench'] = """Cybench is a framework for specifying cybersecurity tasks and evaluating agents on those tasks. This includes 40 professional-level Capture the Flag (CTF) tasks from 4 distinct CTF competitions, chosen to be recent, meaningful, and spanning a wide range of difficulties.
|
| 48 |
+
|
| 49 |
+
Paper: https://arxiv.org/abs/2408.08926
|
| 50 |
+
Code: https://github.com/andyzorigin/cybench
|
| 51 |
+
"""
|
| 52 |
+
LEADERBOARD_MD['NYU CTF Bench'] = """This assesses LLMs in solving CTF challenges. This includes a diverse range of CTF challenges from popular competitions.
|
| 53 |
+
|
| 54 |
+
Paper: https://arxiv.org/abs/2406.05590
|
| 55 |
+
Code: https://github.com/NYU-LLM-CTF/NYU_CTF_Bench
|
| 56 |
+
"""
|
| 57 |
+
LEADERBOARD_MD['CyberBench'] = """CyberBench is a multi-task benchmark to evaluate the model knowledge in cybersecurity.
|
| 58 |
+
|
| 59 |
+
Paper: https://zefang-liu.github.io/files/liu2024cyberbench_paper.pdf
|
| 60 |
+
Code: https://github.com/jpmorganchase/CyberBench
|
| 61 |
+
"""
|
| 62 |
+
LEADERBOARD_MD['CyberMetric'] = """CyberMetric is designed to accurately test the general knowledge of LLMs in cybersecurity. CyberMetric-80, CyberMetric-500, CyberMetric-2000, and CyberMetric-10000 are multiple-choice Q&A benchmark datasets comprising 80, 500, 2000, and 10,000 questions, respectively.
|
| 63 |
+
|
| 64 |
+
Paper: https://arxiv.org/abs/2402.07688
|
| 65 |
+
Code: https://github.com/cybermetric/CyberMetric/tree/main
|
| 66 |
+
"""
|
| 67 |
+
LEADERBOARD_MD['TACTL'] = """Threat Actor Competency Test for LLMs (TACTL) is a multiple-choice benchmark as a challenging offensive cyber knowledge test.
|
| 68 |
+
|
| 69 |
+
Paper: https://arxiv.org/abs/2502.15797
|
| 70 |
+
Code: They plan to open-source TACTL (https://gbhackers.com/mitre-releases-occult-framework/).
|
| 71 |
+
"""
|
| 72 |
+
LEADERBOARD_MD['AutoPenBench'] = """AutoPenBench is an open benchmark for evaluating generative agents in automated penetration testing.
|
| 73 |
+
|
| 74 |
+
Paper: https://arxiv.org/abs/2410.03225
|
| 75 |
+
Code: https://github.com/lucagioacchini/auto-pen-bench
|
| 76 |
+
"""
|
| 77 |
+
LEADERBOARD_MD['PrimeVul'] = """PrimeVul is a dataset for training and evaluating code LMs for vulnerability detection.
|
| 78 |
+
|
| 79 |
+
Paper: https://arxiv.org/abs/2403.18624
|
| 80 |
+
Code: https://github.com/DLVulDet/PrimeVul
|
| 81 |
+
"""
|
| 82 |
+
LEADERBOARD_MD['CRUXEval'] = """CRUXEval (Code Reasoning, Understanding, and eXecution Evaluation) is a benchmark consisting of 800 Python functions (3-13 lines).
|
| 83 |
+
|
| 84 |
+
Paper: https://arxiv.org/abs/2401.03065
|
| 85 |
+
Code: https://github.com/facebookresearch/cruxeval
|
| 86 |
+
"""
|
| 87 |
+
LEADERBOARD_MD['SWE-bench-verified'] = """This is a human-validated subset of SWE-bench that more reliably evaluates AI models' ability to solve real-world software issues.
|
| 88 |
+
|
| 89 |
+
Paper: https://openai.com/index/introducing-swe-bench-verified/
|
| 90 |
+
Code: https://github.com/swe-bench/SWE-bench
|
| 91 |
"""
|