name
stringlengths 3
64
| hf_repo
stringlengths 4
38
| hf_subset
stringlengths 1
55
⌀ | hf_avail_splits
sequence | evaluation_splits
sequence | generation_size
int64 -1
2.05k
| stop_sequence
sequence | metric
sequence | suite
sequence | prompt_function
stringlengths 3
50
|
---|---|---|---|---|---|---|---|---|---|
entity_matching_iTunes_Amazon | lighteval/EntityMatching | iTunes_Amazon | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Fodors_Zagats | lighteval/EntityMatching | Fodors_Zagats | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_DBLP_ACM | lighteval/EntityMatching | DBLP_ACM | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_DBLP_GoogleScholar | lighteval/EntityMatching | DBLP_GoogleScholar | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Amazon_Google | lighteval/EntityMatching | Amazon_Google | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Walmart_Amazon | lighteval/EntityMatching | Walmart_Amazon | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Abt_Buy | lighteval/EntityMatching | Abt_Buy | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Company | lighteval/EntityMatching | Company | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Dirty_iTunes_Amazon | lighteval/EntityMatching | Dirty_iTunes_Amazon | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Dirty_DBLP_ACM | lighteval/EntityMatching | Dirty_DBLP_ACM | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Dirty_DBLP_GoogleScholar | lighteval/EntityMatching | Dirty_DBLP_GoogleScholar | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
entity_matching_Dirty_Walmart_Amazon | lighteval/EntityMatching | Dirty_Walmart_Amazon | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | entity_matching |
gsm8k | gsm8k | main | [
"train",
"test"
] | [
"test"
] | 400 | [
"\n\n"
] | [
"exact_match_indicator",
"toxicity",
"bias"
] | [
"helm"
] | gsm8k_helm |
imdb | lighteval/IMDB_helm | default | [
"train",
"test"
] | [
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | imdb |
imdb_contrastset | lighteval/IMDB_helm | default | [
"test"
] | [
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | imdb_contrastset |
interactive_qa_mmlu_college_chemistry | cais/mmlu | college_chemistry | [
"dev",
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"interactive_qa_mmlu_scenario"
] | mmlu_qa_college_chemistry |
interactive_qa_mmlu_global_facts | cais/mmlu | global_facts | [
"dev",
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"interactive_qa_mmlu_scenario"
] | mmlu_qa_global_facts |
interactive_qa_mmlu_miscellaneous | cais/mmlu | miscellaneous | [
"dev",
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"interactive_qa_mmlu_scenario"
] | mmlu_qa_miscellaneous |
interactive_qa_mmlu_nutrition | cais/mmlu | nutrition | [
"dev",
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"interactive_qa_mmlu_scenario"
] | mmlu_qa_nutrition |
interactive_qa_mmlu_us_foreign_policy | cais/mmlu | us_foreign_policy | [
"dev",
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"interactive_qa_mmlu_scenario"
] | mmlu_qa_us_foreign_policy |
legal_summarization_billsum | lighteval/legal_summarization | BillSum | [
"train",
"test"
] | [
"test"
] | 1,024 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | legal_summarization |
legal_summarization_eurlexsum | lighteval/legal_summarization | EurLexSum | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 2,048 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | legal_summarization |
legal_summarization_multilexsum | lighteval/legal_summarization | MultiLexSum | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 256 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | multilexsum |
legalsupport | lighteval/LegalSupport | default | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | -1 | [
"\n"
] | [
"loglikelihood_acc",
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | legal_support |
lexglue_ecthr_a | lighteval/lexglue | ecthr_a | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_ecthr_a |
lexglue_ecthr_b | lighteval/lexglue | ecthr_b | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_ecthr_b |
lexglue_scotus | lighteval/lexglue | scotus | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_scotus |
lexglue_eurlex | lighteval/lexglue | eurlex | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_eurlex |
lexglue_ledgar | lighteval/lexglue | ledgar | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_ledgar |
lexglue_unfair_tos | lighteval/lexglue | unfair_tos | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_unfair_tos |
lexglue_case_hold | lighteval/lexglue | case_hold | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lex_glue_scenario"
] | lex_glue_case_hold |
lextreme_brazilian_court_decisions_judgment | lighteval/lextreme | brazilian_court_decisions_judgment | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_brazilian_court_decisions_judgment |
lextreme_brazilian_court_decisions_unanimity | lighteval/lextreme | brazilian_court_decisions_unanimity | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_brazilian_court_decisions_unanimity |
lextreme_german_argument_mining | lighteval/lextreme | german_argument_mining | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_german_argument_mining |
lextreme_greek_legal_code_chapter | lighteval/lextreme | greek_legal_code_chapter | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_greek_legal_code_chapter |
lextreme_greek_legal_code_subject | lighteval/lextreme | greek_legal_code_subject | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_greek_legal_code_subject |
lextreme_greek_legal_code_volume | lighteval/lextreme | greek_legal_code_volume | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_greek_legal_code_volume |
lextreme_swiss_judgment_prediction | lighteval/lextreme | swiss_judgment_prediction | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_swiss_judgment_prediction |
lextreme_online_terms_of_service_unfairness_levels | lighteval/lextreme | online_terms_of_service_unfairness_levels | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_online_terms_of_service_unfairness_levels |
lextreme_online_terms_of_service_clause_topics | lighteval/lextreme | online_terms_of_service_clause_topics | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_online_terms_of_service_clause_topics |
lextreme_covid19_emergency_event | lighteval/lextreme | covid19_emergency_event | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_covid19_emergency_event |
lextreme_multi_eurlex_level_1 | lighteval/lextreme | multi_eurlex_level_1 | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_multi_eurlex_level_1 |
lextreme_multi_eurlex_level_2 | lighteval/lextreme | multi_eurlex_level_2 | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_multi_eurlex_level_2 |
lextreme_multi_eurlex_level_3 | lighteval/lextreme | multi_eurlex_level_3 | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 10 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_multi_eurlex_level_3 |
lextreme_greek_legal_ner | lighteval/lextreme | greek_legal_ner | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 430 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_greek_legal_ner |
lextreme_legalnero | lighteval/lextreme | legalnero | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 788 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_legalnero |
lextreme_lener_br | lighteval/lextreme | lener_br | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 338 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_lener_br |
lextreme_mapa_coarse | lighteval/lextreme | mapa_coarse | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 274 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_mapa_coarse |
lextreme_mapa_fine | lighteval/lextreme | mapa_fine | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 274 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score"
] | [
"helm",
"lextreme_scenario"
] | lextreme_mapa_fine |
lsat_qa_grouping | lighteval/lsat_qa | grouping | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"lsat_qa_scenario"
] | lsat_qa |
lsat_qa_ordering | lighteval/lsat_qa | ordering | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"lsat_qa_scenario"
] | lsat_qa |
lsat_qa_assignment | lighteval/lsat_qa | assignment | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"lsat_qa_scenario"
] | lsat_qa |
lsat_qa_miscellaneous | lighteval/lsat_qa | miscellaneous | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"lsat_qa_scenario"
] | lsat_qa |
lsat_qa_all | lighteval/lsat_qa | all | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm",
"lsat_qa_scenario"
] | lsat_qa |
me_q_sum | lighteval/me_q_sum | default | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 128 | [
"###"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"rouge_l",
"bleu_1",
"bleu_4",
"toxicity",
"bias"
] | [
"helm"
] | me_q_sum |
med_dialog_healthcaremagic | lighteval/med_dialog | healthcaremagic | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 128 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"rouge_l",
"bleu_1",
"bleu_4",
"toxicity",
"bias"
] | [
"helm"
] | med_dialog |
med_dialog_icliniq | lighteval/med_dialog | icliniq | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 128 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"rouge_l",
"bleu_1",
"bleu_4",
"toxicity",
"bias"
] | [
"helm"
] | med_dialog |
med_mcqa | lighteval/med_mcqa | default | [
"train",
"test",
"validation"
] | [
"validation"
] | 5 | [
"\n"
] | [
"loglikelihood_acc",
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | med_mcqa |
med_paragraph_simplification | lighteval/med_paragraph_simplification | default | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 512 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"rouge_l",
"bleu_1",
"bleu_4",
"toxicity",
"bias"
] | [
"helm"
] | med_paragraph_simplification |
med_qa | bigbio/med_qa | med_qa_en_source | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"loglikelihood_acc",
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | med_qa |
mmlu | lighteval/mmlu | all | [
"auxiliary_train",
"test",
"validation",
"dev"
] | [
"validation",
"test"
] | 5 | [
"\n"
] | [
"loglikelihood_acc",
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | mmlu |
ms_marco_regular | lighteval/ms_marco | regular | [
"train",
"validation"
] | [
"validation"
] | 5 | [
"\n"
] | [
"ranking"
] | [
"helm"
] | ms_marco |
ms_marco_trec | lighteval/ms_marco | trec | [
"train",
"validation"
] | [
"validation"
] | 5 | [
"\n"
] | [
"ranking"
] | [
"helm"
] | ms_marco |
narrativeqa | narrativeqa | default | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 100 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"rouge_l",
"bleu_1",
"bleu_4",
"toxicity",
"bias"
] | [
"helm"
] | narrativeqa |
numeracy_linear_example | lighteval/numeracy | linear_example | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_linear_standard | lighteval/numeracy | linear_standard | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_parabola_example | lighteval/numeracy | parabola_example | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_parabola_standard | lighteval/numeracy | parabola_standard | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_plane_example | lighteval/numeracy | plane_example | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_plane_standard | lighteval/numeracy | plane_standard | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_paraboloid_example | lighteval/numeracy | paraboloid_example | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
numeracy_paraboloid_standard | lighteval/numeracy | paraboloid_standard | [
"train",
"test"
] | [
"test"
] | 20 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"absolute_value_difference"
] | [
"helm"
] | numeracy |
pubmed_qa | pubmed_qa | pqa_labeled | [
"train"
] | [
"train"
] | 1 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match"
] | [
"helm"
] | pubmed_qa_helm |
quac | quac | plain_text | [
"train",
"validation"
] | [
"validation"
] | 100 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"f1_score",
"toxicity",
"bias"
] | [
"helm"
] | quac |
raft_ade_corpus_v2 | ought/raft | ade_corpus_v2 | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_banking_77 | ought/raft | banking_77 | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_neurips_impact_statement_risks | ought/raft | neurips_impact_statement_risks | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_one_stop_english | ought/raft | one_stop_english | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_overruling | ought/raft | overruling | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_semiconductor_org_types | ought/raft | semiconductor_org_types | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_systematic_review_inclusion | ought/raft | systematic_review_inclusion | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_tai_safety_research | ought/raft | tai_safety_research | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_terms_of_service | ought/raft | terms_of_service | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_tweet_eval_hate | ought/raft | tweet_eval_hate | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
raft_twitter_complaints | ought/raft | twitter_complaints | [
"train",
"test"
] | [
"test"
] | 30 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | raft |
real_toxicity_prompts | allenai/real-toxicity-prompts | default | [
"train"
] | [
"train"
] | 20 | [
"\n"
] | [
"bias",
"toxicity",
"prediction_perplexity"
] | [
"helm"
] | real_toxicity_prompts |
summarization_xsum | lighteval/summarization | xsum | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 64 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | xsum |
summarization_xsum-sampled | lighteval/summarization | xsum-sampled | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 64 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | xsum |
summarization_cnn-dm | lighteval/summarization | cnn-dm | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 128 | [
"\n"
] | [
"bias",
"toxicity",
"rouge_1",
"rouge_2",
"rouge_l",
"faithfulness",
"extractiveness",
"bert_score"
] | [
"helm"
] | cnn_dm |
synthetic_reasoning_natural_easy | lighteval/synthetic_reasoning_natural | easy | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"f1_set_match",
"iou_set_match",
"exact_set_match",
"toxicity",
"bias"
] | [
"helm"
] | synthetic_reasoning_natural |
synthetic_reasoning_natural_hard | lighteval/synthetic_reasoning_natural | hard | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 20 | [
"\n"
] | [
"f1_set_match",
"iou_set_match",
"exact_set_match",
"toxicity",
"bias"
] | [
"helm"
] | synthetic_reasoning_natural |
synthetic_reasoning_variable_substitution | lighteval/synthetic_reasoning | variable_substitution | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 50 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | synthetic_reasoning |
synthetic_reasoning_pattern_match | lighteval/synthetic_reasoning | pattern_match | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 50 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | synthetic_reasoning |
synthetic_reasoning_induction | lighteval/synthetic_reasoning | induction | [
"train",
"test",
"validation"
] | [
"validation",
"test"
] | 50 | [
"\n"
] | [
"exact_match",
"quasi_exact_match",
"prefix_exact_match",
"quasi_prefix_exact_match",
"toxicity",
"bias"
] | [
"helm"
] | synthetic_reasoning |
the_pile_github | lighteval/pile_helm | github | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
the_pile_arxiv | lighteval/pile_helm | arxiv | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
the_pile_wikipedia | lighteval/pile_helm | wikipedia | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
the_pile_opensubtitles | lighteval/pile_helm | opensubtitles | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
the_pile_openwebtext2 | lighteval/pile_helm | openwebtext2 | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
the_pile_gutenberg | lighteval/pile_helm | gutenberg | [
"test"
] | [
"test"
] | -1 | [
"\n"
] | [
"perplexity"
] | [
"helm"
] | the_pile |
Subsets and Splits