andylizf commited on
Commit
896e315
·
verified ·
1 Parent(s): ae42bb0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. latency_breakdown_diskann_20250218_074811.json +0 -0
  2. latency_breakdown_diskann_20250218_080100.json +0 -0
  3. rag-evaluation-harness/.ruff_cache/0.9.3/10823800545715580695 +0 -0
  4. rag-evaluation-harness/.ruff_cache/0.9.3/12210255023071856587 +0 -0
  5. rag-evaluation-harness/.ruff_cache/0.9.3/13094765894134414933 +0 -0
  6. rag-evaluation-harness/.ruff_cache/0.9.3/15155754320116275005 +0 -0
  7. rag-evaluation-harness/.ruff_cache/0.9.3/1770165252534271032 +0 -0
  8. rag-evaluation-harness/.ruff_cache/0.9.3/17718822729537072132 +0 -0
  9. rag-evaluation-harness/.ruff_cache/0.9.3/2644020233717826995 +0 -0
  10. rag-evaluation-harness/.ruff_cache/0.9.3/4342823198429526296 +0 -0
  11. rag-evaluation-harness/.ruff_cache/0.9.3/6569859743196745993 +0 -0
  12. rag-evaluation-harness/.ruff_cache/0.9.3/7617589821426377784 +0 -0
  13. rag-evaluation-harness/lm_eval/tasks/README.md +112 -0
  14. rag-evaluation-harness/lm_eval/tasks/ammlu/_generate_configs.py +120 -0
  15. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml +4 -0
  16. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml +4 -0
  17. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml +4 -0
  18. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml +4 -0
  19. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml +4 -0
  20. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml +4 -0
  21. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_machine_learning.yaml +4 -0
  22. rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_nutrition.yaml +4 -0
  23. rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml +29 -0
  24. rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml +23 -0
  25. rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_vocab.yaml +20 -0
  26. rag-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +4 -0
  27. rag-evaluation-harness/lm_eval/tasks/kobest/kobest_sentineg.yaml +25 -0
  28. rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_hi.yaml +7 -0
  29. rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_hy.yaml +7 -0
  30. rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_sk.yaml +7 -0
  31. rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_te.yaml +7 -0
  32. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_fr.yaml +6 -0
  33. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml +6 -0
  34. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml +6 -0
  35. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml +6 -0
  36. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml +6 -0
  37. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml +6 -0
  38. rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_te.yaml +6 -0
  39. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/_default_yaml +17 -0
  40. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_bn.yaml +4 -0
  41. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_de.yaml +4 -0
  42. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_eu.yaml +4 -0
  43. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ml.yaml +4 -0
  44. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ro.yaml +4 -0
  45. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sk.yaml +4 -0
  46. rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_uk.yaml +4 -0
  47. rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/README.md +47 -0
  48. rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc1.yaml +7 -0
  49. rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc2.yaml +7 -0
  50. rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc1.yaml +7 -0
latency_breakdown_diskann_20250218_074811.json ADDED
The diff for this file is too large to render. See raw diff
 
latency_breakdown_diskann_20250218_080100.json ADDED
The diff for this file is too large to render. See raw diff
 
rag-evaluation-harness/.ruff_cache/0.9.3/10823800545715580695 ADDED
Binary file (154 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/12210255023071856587 ADDED
Binary file (150 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/13094765894134414933 ADDED
Binary file (191 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/15155754320116275005 ADDED
Binary file (141 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/1770165252534271032 ADDED
Binary file (148 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/17718822729537072132 ADDED
Binary file (271 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/2644020233717826995 ADDED
Binary file (181 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/4342823198429526296 ADDED
Binary file (187 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/6569859743196745993 ADDED
Binary file (133 Bytes). View file
 
rag-evaluation-harness/.ruff_cache/0.9.3/7617589821426377784 ADDED
Binary file (182 Bytes). View file
 
rag-evaluation-harness/lm_eval/tasks/README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Tasks
3
+
4
+ A list of supported tasks and task groupings can be viewed with `lm-eval --tasks list`.
5
+
6
+ For more information, including a full list of task names and their precise meanings or sources, follow the links provided to the individual README.md files for each subfolder.
7
+
8
+ | Task Family | Description | Language(s) |
9
+ |-------------|-------------|-------------|
10
+ | [aclue](aclue/README.md) | Tasks focusing on ancient Chinese language understanding and cultural aspects. | Ancient Chinese |
11
+ | [aexams](aexams/README.md) | Tasks in Arabic related to various academic exams covering a range of subjects. | Arabic |
12
+ | [agieval](agieval/README.md) | Tasks involving historical data or questions related to history and historical texts. | English, Chinese |
13
+ | [ammlu](ammlu/README.md) | Arabic version of MMLU. | Arabic |
14
+ | [anli](anli/README.md) | Adversarial natural language inference tasks designed to test model robustness. | English |
15
+ | [arc](arc/README.md) | Tasks involving complex reasoning over a diverse set of questions. | English |
16
+ | [arithmetic](arithmetic/README.md) | Tasks involving numerical computations and arithmetic reasoning. | English |
17
+ | [asdiv](asdiv/README.md) | Tasks involving arithmetic and mathematical reasoning challenges. | English |
18
+ | [babi](babi/README.md) | Tasks designed as question and answering challenges based on simulated stories. | English |
19
+ | [basqueglue](basqueglue/README.md) | Tasks designed to evaluate language understanding in Basque language. | Basque |
20
+ | [bbh](bbh/README.md) | Tasks focused on deep semantic understanding through hypothesization and reasoning. | English, German |
21
+ | [belebele](belebele/README.md) | Language understanding tasks in a variety of languages and scripts. | Multiple (122 languages) |
22
+ | benchmarks | General benchmarking tasks that test a wide range of language understanding capabilities. | |
23
+ | [bigbench](bigbench/README.md) | Broad tasks from the BIG-bench benchmark designed to push the boundaries of large models. | Multiple |
24
+ | [blimp](blimp/README.md) | Tasks testing grammatical phenomena to evaluate language model's linguistic capabilities. | English |
25
+ | [ceval](ceval/README.md) | Tasks that evaluate language understanding and reasoning in an educational context. | Chinese |
26
+ | [cmmlu](cmmlu/README.md) | Multi-subject multiple choice question tasks for comprehensive academic assessment. | Chinese |
27
+ | code_x_glue | Tasks that involve understanding and generating code across multiple programming languages. | Go, Java, JS, PHP, Python, Ruby |
28
+ | [copal_id](copal_id/README.md) | Indonesian causal commonsense reasoning dataset that captures local nuances. | Indonesian |
29
+ | [coqa](coqa/README.md) | Conversational question answering tasks to test dialog understanding. | English |
30
+ | [crows_pairs](crows_pairs/README.md) | Tasks designed to test model biases in various sociodemographic groups. | English, French |
31
+ | csatqa | Tasks related to SAT and other standardized testing questions for academic assessment. | Korean |
32
+ | [drop](drop/README.md) | Tasks requiring numerical reasoning, reading comprehension, and question answering. | English |
33
+ | [eq_bench](eq_bench/README.md) | Tasks focused on equality and ethics in question answering and decision-making. | English |
34
+ | [eus_exams](eus_exams/README.md) | Tasks based on various professional and academic exams in the Basque language. | Basque |
35
+ | [eus_proficiency](eus_proficiency/README.md) | Tasks designed to test proficiency in the Basque language across various topics. | Basque |
36
+ | [eus_reading](eus_reading/README.md) | Reading comprehension tasks specifically designed for the Basque language. | Basque |
37
+ | [eus_trivia](eus_trivia/README.md) | Trivia and knowledge testing tasks in the Basque language. | Basque |
38
+ | [fda](fda/README.md) | Tasks for extracting key-value pairs from FDA documents to test information extraction. | English |
39
+ | [fld](fld/README.md) | Tasks involving free-form and directed dialogue understanding. | English |
40
+ | [french_bench](french_bench/README.md) | Set of tasks designed to assess language model performance in French. | French|
41
+ | [glue](glue/README.md) | General Language Understanding Evaluation benchmark to test broad language abilities. | English |
42
+ | [gpqa](gpqa/README.md) | Tasks designed for general public question answering and knowledge verification. | English |
43
+ | [gsm8k](gsm8k/README.md) | A benchmark of grade school math problems aimed at evaluating reasoning capabilities. | English |
44
+ | [haerae](haerae/README.md) | Tasks focused on assessing detailed factual and historical knowledge. | Korean |
45
+ | [headqa](headqa/README.md) | A high-level education-based question answering dataset to test specialized knowledge. | Spanish, English |
46
+ | [hellaswag](hellaswag/README.md) | Tasks to predict the ending of stories or scenarios, testing comprehension and creativity. | English |
47
+ | [hendrycks_ethics](hendrycks_ethics/README.md) | Tasks designed to evaluate the ethical reasoning capabilities of models. | English |
48
+ | [hendrycks_math](hendrycks_math/README.md) | Mathematical problem-solving tasks to test numerical reasoning and problem-solving. | English |
49
+ | [ifeval](ifeval/README.md) | Interactive fiction evaluation tasks for narrative understanding and reasoning. | English |
50
+ | [kmmlu](kmmlu/README.md) | Knowledge-based multi-subject multiple choice questions for academic evaluation. | Korean |
51
+ | [kobest](kobest/README.md) | A collection of tasks designed to evaluate understanding in Korean language. | Korean |
52
+ | [kormedmcqa](kormedmcqa/README.md) | Medical question answering tasks in Korean to test specialized domain knowledge. | Korean |
53
+ | [lambada](lambada/README.md) | Tasks designed to predict the endings of text passages, testing language prediction skills. | English |
54
+ | [lambada_cloze](lambada_cloze/README.md) | Cloze-style LAMBADA dataset. | English |
55
+ | [lambada_multilingual](lambada_multilingual/README.md) | Multilingual LAMBADA dataset. This is a legacy version of the multilingual dataset, and users should instead use `lambada_multilingual_stablelm`. | German, English, Spanish, French, Italian |
56
+ | [lambada_multilingual_stablelm](lambada_multilingual_stablelm/README.md) | Multilingual LAMBADA dataset. Users should prefer evaluating on this version of the multilingual dataset instead of on `lambada_multilingual`. | German, English, Spanish, French, Italian, Dutch, Portuguese |
57
+ | [logiqa](logiqa/README.md) | Logical reasoning tasks requiring advanced inference and deduction. | English, Chinese |
58
+ | [logiqa2](logiqa2/README.md) | Large-scale logical reasoning dataset adapted from the Chinese Civil Service Examination. | English, Chinese |
59
+ | [mathqa](mathqa/README.md) | Question answering tasks involving mathematical reasoning and problem-solving. | English |
60
+ | [mc_taco](mc_taco/README.md) | Question-answer pairs that require temporal commonsense comprehension. | English |
61
+ | medmcqa | Medical multiple choice questions assessing detailed medical knowledge. | English |
62
+ | medqa | Multiple choice question answering based on the United States Medical License Exams. | |
63
+ | [mgsm](mgsm/README.md) | Benchmark of multilingual grade-school math problems. | Spanish, French, German, Russian, Chinese, Japanese, Thai, Swahili, Bengali, Telugu |
64
+ | [minerva_math](minerva_math/README.md) | Mathematics-focused tasks requiring numerical reasoning and problem-solving skills. | English |
65
+ | mmlu | Massive Multitask Language Understanding benchmark for broad domain language evaluation. Several variants are supported. | English |
66
+ | model_written_evals | Evaluation tasks auto-generated for evaluating a collection of AI Safety concerns. | |
67
+ | [mutual](mutual/README.md) | A retrieval-based dataset for multi-turn dialogue reasoning. | English |
68
+ | [nq_open](nq_open/README.md) | Open domain question answering tasks based on the Natural Questions dataset. | English |
69
+ | [okapi/arc_multilingual](okapi/arc_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) **Machine Translated.** |
70
+ | [okapi/hellaswag_multilingual](okapi/hellaswag_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (30 languages) |
71
+ | okapi/mmlu_multilingual | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (34 languages) |
72
+ | [okapi/truthfulqa_multilingual](okapi/truthfulqa_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) |
73
+ | [openbookqa](openbookqa/README.md) | Open-book question answering tasks that require external knowledge and reasoning. | English |
74
+ | [paws-x](paws-x/README.md) | Paraphrase Adversaries from Word Scrambling, focusing on cross-lingual capabilities. | English, French, Spanish, German, Chinese, Japanese, Korean |
75
+ | [pile](pile/README.md) | Open source language modelling data set that consists of 22 smaller, high-quality datasets. | English |
76
+ | [pile_10k](pile_10k/README.md) | The first 10K elements of The Pile, useful for debugging models trained on it. | English |
77
+ | [piqa](piqa/README.md) | Physical Interaction Question Answering tasks to test physical commonsense reasoning. | English |
78
+ | [polemo2](polemo2/README.md) | Sentiment analysis and emotion detection tasks based on Polish language data. | Polish |
79
+ | [prost](prost/README.md) | Tasks requiring understanding of professional standards and ethics in various domains. | English |
80
+ | [pubmedqa](pubmedqa/README.md) | Question answering tasks based on PubMed research articles for biomedical understanding. | English |
81
+ | [qa4mre](qa4mre/README.md) | Question Answering for Machine Reading Evaluation, assessing comprehension and reasoning. | English |
82
+ | [qasper](qasper/README.md) | Question Answering dataset based on academic papers, testing in-depth scientific knowledge. | English |
83
+ | [race](race/README.md) | Reading comprehension assessment tasks based on English exams in China. | English |
84
+ | realtoxicityprompts | Tasks to evaluate language models for generating text with potential toxicity. | |
85
+ | [sciq](sciq/README.md) | Science Question Answering tasks to assess understanding of scientific concepts. | English |
86
+ | [scrolls](scrolls/README.md) | Tasks that involve long-form reading comprehension across various domains. | English |
87
+ | [siqa](siqa/README.md) | Social Interaction Question Answering to evaluate common sense and social reasoning. | English |
88
+ | [squad_completion](squad_completion/README.md) | A variant of the SQuAD question answering task designed for zero-shot evaluation of small LMs. | English |
89
+ | [squadv2](squadv2/README.md) | Stanford Question Answering Dataset version 2, a reading comprehension benchmark. | English |
90
+ | [storycloze](storycloze/README.md) | Tasks to predict story endings, focusing on narrative logic and coherence. | English |
91
+ | [super_glue](super_glue/README.md) | A suite of challenging tasks designed to test a range of language understanding skills. | English |
92
+ | [swag](swag/README.md) | Situations With Adversarial Generations, predicting the next event in videos. | English |
93
+ | [swde](swde/README.md) | Information extraction tasks from semi-structured web pages. | English |
94
+ | [tinyBenchmarks](tinyBenchmarks/README.md) | Evaluation of large language models with fewer examples using tiny versions of popular benchmarks. | English |
95
+ | [tmmluplus](tmmluplus/README.md) | An extended set of tasks under the TMMLU framework for broader academic assessments. | Traditional Chinese |
96
+ | [toxigen](toxigen/README.md) | Tasks designed to evaluate language models on their propensity to generate toxic content. | English |
97
+ | [translation](translation/README.md) | Tasks focused on evaluating the language translation capabilities of models. | Arabic, English, Spanish, Basque, Hindi, Indonesian, Burmese, Russian, Swahili, Telugu, Chinese |
98
+ | [triviaqa](triviaqa/README.md) | A large-scale dataset for trivia question answering to test general knowledge. | English |
99
+ | [truthfulqa](truthfulqa/README.md) | A QA task aimed at evaluating the truthfulness and factual accuracy of model responses. | English |
100
+ | [unitxt](unitxt/README.md) | A number of tasks implemented using the unitxt library for flexible, shareable, and reusable data preparation and evaluation for generative AI. | English |
101
+ | [unscramble](unscramble/README.md) | Tasks involving the rearrangement of scrambled sentences to test syntactic understanding. | English |
102
+ | [webqs](webqs/README.md) | Web-based question answering tasks designed to evaluate internet search and retrieval. | English |
103
+ | [wikitext](wikitext/README.md) | Tasks based on text from Wikipedia articles to assess language modeling and generation. | English |
104
+ | [winogrande](winogrande/README.md) | A large-scale dataset for coreference resolution, inspired by the Winograd Schema Challenge. | English |
105
+ | [wmdp](wmdp/README.md) | A benchmark with the objective of minimizing performance, based on potentially-sensitive multiple-choice knowledge questions. | English |
106
+ | [wmt2016](wmt2016/README.md) | Tasks from the WMT 2016 shared task, focusing on translation between multiple languages. | English, Czech, German, Finnish, Russian, Romanian, Turkish |
107
+ | [wsc273](wsc273/README.md) | The Winograd Schema Challenge, a test of commonsense reasoning and coreference resolution. | English |
108
+ | [xcopa](xcopa/README.md) | Cross-lingual Choice of Plausible Alternatives, testing reasoning in multiple languages. | Estonian, Haitian, Indonesian, Italian, Quechua, Swahili, Tamil, Thai, Turkish, Vietnamese, Chinese |
109
+ | [xnli](xnli/README.md) | Cross-Lingual Natural Language Inference to test understanding across different languages. | Arabic, Bulgarian, German, Greekm English, Spanish, French, Hindi, Russian, Swahili, Thai, Turkish, Urdu, Vietnamese, Chinese |
110
+ | [xnli_eu](xnli_eu/README.md) | Cross-lingual Natural Language Inference tasks in Basque. | Basque |
111
+ | [xstorycloze](xstorycloze/README.md) | Cross-lingual narrative understanding tasks to predict story endings in multiple languages. | Russian, Simplified Chinese, Spanish, Arabic, Hindi, Indonesian, Telugu, Swahili, Basque, Burmese |
112
+ | [xwinograd](xwinograd/README.md) | Cross-lingual Winograd schema tasks for coreference resolution in multiple languages. | English, French, Japanese, Portuguese, Russian, Chinese |
rag-evaluation-harness/lm_eval/tasks/ammlu/_generate_configs.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+
8
+ import yaml
9
+ from tqdm import tqdm
10
+
11
+
12
+ SUBJECTS = {
13
+ "abstract_algebra": "ألعلوم وتقنية المعلومات و الرياضيات",
14
+ "anatomy": "ألعلوم وتقنية المعلومات و الرياضيات",
15
+ "astronomy": "ألعلوم وتقنية المعلومات و الرياضيات",
16
+ "business_ethics": "علوم أخرى",
17
+ "clinical_knowledge": "علوم أخرى",
18
+ "college_biology": "ألعلوم وتقنية المعلومات و الرياضيات",
19
+ "college_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات",
20
+ "college_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات",
21
+ "college_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
22
+ "college_medicine": "علوم أخرى",
23
+ "college_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
24
+ "computer_security": "ألعلوم وتقنية المعلومات و الرياضيات",
25
+ "conceptual_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
26
+ "econometrics": "العلوم الإجتماعية",
27
+ "electrical_engineering": "ألعلوم وتقنية المعلومات و الرياضيات",
28
+ "elementary_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
29
+ "formal_logic": "العلوم الانسانية",
30
+ "global_facts": "علوم أخرى",
31
+ "high_school_biology": "ألعلوم وتقنية المعلومات و الرياضيات",
32
+ "high_school_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات",
33
+ "high_school_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات",
34
+ "high_school_european_history": "العلوم الانسانية",
35
+ "high_school_geography": "العلوم الإجتماعية",
36
+ "high_school_government_and_politics": "العلوم الإجتماعية",
37
+ "high_school_macroeconomics": "العلوم الإجتماعية",
38
+ "high_school_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
39
+ "high_school_microeconomics": "العلوم الإجتماعية",
40
+ "high_school_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
41
+ "high_school_psychology": "العلوم الإجتماعية",
42
+ "high_school_statistics": "ألعلوم وتقنية المعلومات و الرياضيات",
43
+ "high_school_us_history": "العلوم الانسانية",
44
+ "high_school_world_history": "العلوم الانسانية",
45
+ "human_aging": "علوم أخرى",
46
+ "human_sexuality": "العلوم الإجتماعية",
47
+ "international_law": "العلوم الانسانية",
48
+ "jurisprudence": "العلوم الانسانية",
49
+ "logical_fallacies": "العلوم الانسانية",
50
+ "machine_learning": "ألعلوم وتقنية المعلومات و الرياضيات",
51
+ "management": "علوم أخرى",
52
+ "marketing": "علوم أخرى",
53
+ "medical_genetics": "علوم أخرى",
54
+ "miscellaneous": "علوم أخرى",
55
+ "moral_disputes": "العلوم الانسانية",
56
+ "moral_scenarios": "العلوم الانسانية",
57
+ "nutrition": "علوم أخرى",
58
+ "philosophy": "العلوم الانسانية",
59
+ "prehistory": "العلوم الانسانية",
60
+ "professional_accounting": "علوم أخرى",
61
+ "professional_law": "العلوم الانسانية",
62
+ "professional_medicine": "علوم أخرى",
63
+ "professional_psychology": "العلوم الإجتماعية",
64
+ "public_relations": "العلوم الإجتماعية",
65
+ "security_studies": "العلوم الإجتماعية",
66
+ "sociology": "العلوم الإجتماعية",
67
+ "us_foreign_policy": "العلوم الإجتماعية",
68
+ "virology": "علوم أخرى",
69
+ "world_religions": "العلوم الانسانية",
70
+ }
71
+
72
+
73
+ def parse_args():
74
+ parser = argparse.ArgumentParser()
75
+ parser.add_argument("--base_yaml_path", required=True)
76
+ parser.add_argument("--save_prefix_path", default="ammlu")
77
+ parser.add_argument("--cot_prompt_path", default=None)
78
+ parser.add_argument("--task_prefix", default="")
79
+ return parser.parse_args()
80
+
81
+
82
+ if __name__ == "__main__":
83
+ args = parse_args()
84
+
85
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
86
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
87
+ with open(args.base_yaml_path, encoding="utf-8") as f:
88
+ base_yaml = yaml.full_load(f)
89
+
90
+ if args.cot_prompt_path is not None:
91
+ import json
92
+
93
+ with open(args.cot_prompt_path, encoding="utf-8") as f:
94
+ cot_file = json.load(f)
95
+
96
+ for subject_eng, category in tqdm(SUBJECTS.items()):
97
+ if args.cot_prompt_path is not None:
98
+ description = cot_file[subject_eng]
99
+ else:
100
+ description = f"فم بعملية التقييم في مجال {category} \n\n"
101
+
102
+ yaml_dict = {
103
+ "include": base_yaml_name,
104
+ "task": f"ammlu_{args.task_prefix}_{subject_eng}"
105
+ if args.task_prefix != ""
106
+ else f"ammlu_{subject_eng}",
107
+ "dataset_name": subject_eng,
108
+ "description": description,
109
+ }
110
+
111
+ file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
112
+ print(f"Saving yaml for subset {subject_eng} to {file_save_path}")
113
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
114
+ yaml.dump(
115
+ yaml_dict,
116
+ yaml_file,
117
+ width=float("inf"),
118
+ allow_unicode=True,
119
+ default_style='"',
120
+ )
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "electrical_engineering"
2
+ "description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_electrical_engineering"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_biology"
2
+ "description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_high_school_biology"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_geography"
2
+ "description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_high_school_geography"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_microeconomics"
2
+ "description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_high_school_microeconomics"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_us_history"
2
+ "description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_high_school_us_history"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "jurisprudence"
2
+ "description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_jurisprudence"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_machine_learning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "machine_learning"
2
+ "description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_machine_learning"
rag-evaluation-harness/lm_eval/tasks/ammlu/ammlu_nutrition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "nutrition"
2
+ "description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "ammlu_nutrition"
rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'."
6
+ task: french_bench_fquadv2
7
+ dataset_path: manu/fquad2_test
8
+ output_type: generate_until
9
+ validation_split: valid
10
+ doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
11
+ doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}"
12
+ target_delimiter: " "
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: context
15
+ generation_kwargs:
16
+ until:
17
+ - "\n"
18
+ # filter_list:
19
+ # - name: remove_whitespace
20
+ # filter:
21
+ # - function: remove_whitespace
22
+ # - function: take_first
23
+ metric_list:
24
+ - metric: !function utils.exact
25
+ aggregation: mean
26
+ higher_is_better: true
27
+ - metric: !function utils.f1
28
+ aggregation: mean
29
+ higher_is_better: true
rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "A propos du thème spécifié, l'avis client est il positif, négatif, ou neutre ?"
6
+ task: french_bench_topic_based_nli
7
+ dataset_path: manu/topic_based_nli_test
8
+ output_type: multiple_choice
9
+ validation_split: valid
10
+ # doc_to_text: "\nAvis Client: {{text}}\n\nEn considèrant uniquement le thème \"{{topic}}\", l'avis client est plutot:\nA. Positif \nB. Négatif\nC. Mitigé \nD. Neutre\nE. Absent\n\nRéponse:"
11
+ # doc_to_choice: ["A", "B", "C", "D", "E"]
12
+ doc_to_text: "\nAvis Client: {{text}}\n\nA propos du thème \"{{topic}}\", l'avis client est"
13
+ doc_to_choice: ['positif', 'négatif', 'neutre']
14
+ doc_to_target: "{{['positif', 'negatif', 'neutre'].index(polarity)}}"
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: texte
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ - metric: acc_norm
22
+ aggregation: mean
23
+ higher_is_better: true
rag-evaluation-harness/lm_eval/tasks/french_bench/french_bench_vocab.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_mc
5
+ # description: "Répond au mieux en complétant la question avec une des réponses proposées."
6
+ dataset_path: manu/french-bench-grammar-vocab-reading
7
+ output_type: multiple_choice
8
+ validation_split: Vocabulary
9
+ fewshot_split: Vocabulary
10
+ test_split: Vocabulary
11
+ # doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
12
+ # doc_to_choice: ["A", "B", "C", "D"]
13
+ doc_to_text: "La phrase suivante est logique sémantiquement:\n"
14
+ doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}"
15
+ doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
16
+ task: french_bench_vocab
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
rag-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_main_cot_n_shot
rag-evaluation-harness/lm_eval/tasks/kobest/kobest_sentineg.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - kobest
3
+ task: kobest_sentineg
4
+ dataset_path: skt/kobest_v1
5
+ dataset_name: sentineg
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: !function utils.sentineg_doc_to_text
11
+ doc_to_target: "{{label}}"
12
+ doc_to_choice: ["부정", "긍정"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: True
17
+ - metric: f1
18
+ aggregation: !function utils.macro_f1_score
19
+ average: macro
20
+ hf_evaluate: true
21
+ higher_is_better: True
22
+ metadata:
23
+ version: 1.0
24
+ dataset_kwargs:
25
+ trust_remote_code: true
rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_hi.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _arc_yaml
2
+ task: arc_hi
3
+ dataset_path: alexandrainst/m_arc
4
+ dataset_name: hi
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_hy.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _arc_yaml
2
+ task: arc_hy
3
+ dataset_path: alexandrainst/m_arc
4
+ dataset_name: hy
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_sk.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _arc_yaml
2
+ task: arc_sk
3
+ dataset_path: alexandrainst/m_arc
4
+ dataset_name: sk
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
rag-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_te.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _arc_yaml
2
+ task: arc_te
3
+ dataset_path: alexandrainst/m_arc
4
+ dataset_name: te
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_fr.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_fr
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: fr
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_id
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: id
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_kn
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: kn
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ml
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ml
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_sr
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: sr
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ta
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ta
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_te.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_te
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: te
5
+ training_split: null
6
+ validation_split: val
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/_default_yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - m_mmlu
3
+ dataset_path: alexandrainst/m_mmlu
4
+ test_split: test
5
+ fewshot_split: train
6
+ fewshot_config:
7
+ sampler: first_n
8
+ output_type: multiple_choice
9
+ doc_to_text: "{{instruction.strip()}}\nA. {{option_a}}\nB. {{option_b}}\nC. {{option_c}}\nD. {{option_d}}\nAnswer:"
10
+ doc_to_choice: ["A", "B", "C", "D"]
11
+ doc_to_target: answer
12
+ metric_list:
13
+ - metric: acc
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ metadata:
17
+ version: 0.0
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_bn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: bn
3
+ include: _default_yaml
4
+ task: m_mmlu_bn
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_de.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: de
3
+ include: _default_yaml
4
+ task: m_mmlu_de
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_eu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: eu
3
+ include: _default_yaml
4
+ task: m_mmlu_eu
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ml.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: ml
3
+ include: _default_yaml
4
+ task: m_mmlu_ml
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ro.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: ro
3
+ include: _default_yaml
4
+ task: m_mmlu_ro
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sk.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: sk
3
+ include: _default_yaml
4
+ task: m_mmlu_sk
rag-evaluation-harness/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_uk.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: uk
3
+ include: _default_yaml
4
+ task: m_mmlu_uk
rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multilingual TruthfulQA
2
+
3
+ ### Paper
4
+
5
+ Title: `Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback`
6
+
7
+ Abstract: https://arxiv.org/abs/2307.16039
8
+
9
+ A key technology for the development of large language models (LLMs) involves instruction tuning that helps align the models' responses with human expectations to realize impressive learning abilities. Two major approaches for instruction tuning characterize supervised fine-tuning (SFT) and reinforcement learning from human feedback (RLHF), which are currently applied to produce the best commercial LLMs (e.g., ChatGPT). To improve the accessibility of LLMs for research and development efforts, various instruction-tuned open-source LLMs have also been introduced recently, e.g., Alpaca, Vicuna, to name a few. However, existing open-source LLMs have only been instruction-tuned for English and a few popular languages, thus hindering their impacts and accessibility to many other languages in the world. Among a few very recent work to explore instruction tuning for LLMs in multiple languages, SFT has been used as the only approach to instruction-tune LLMs for multiple languages. This has left a significant gap for fine-tuned LLMs based on RLHF in diverse languages and raised important questions on how RLHF can boost the performance of multilingual instruction tuning. To overcome this issue, we present Okapi, the first system with instruction-tuned LLMs based on RLHF for multiple languages. Okapi introduces instruction and response-ranked data in 26 diverse languages to facilitate the experiments and development of future multilingual LLM research. We also present benchmark datasets to enable the evaluation of generative LLMs in multiple languages. Our experiments demonstrate the advantages of RLHF for multilingual instruction over SFT for different base models and datasets. Our framework and resources are released at this https URL.
10
+
11
+ Homepage: `https://github.com/nlp-uoregon/Okapi`
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @article{dac2023okapi,
18
+ title={Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback},
19
+ author={Dac Lai, Viet and Van Nguyen, Chien and Ngo, Nghia Trung and Nguyen, Thuat and Dernoncourt, Franck and Rossi, Ryan A and Nguyen, Thien Huu},
20
+ journal={arXiv e-prints},
21
+ pages={arXiv--2307},
22
+ year={2023}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ - truthfulqa_multilingual
31
+
32
+ #### Tasks
33
+
34
+ - `truthfulqa_{ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh}`
35
+
36
+ ### Checklist
37
+
38
+ For adding novel benchmarks/datasets to the library:
39
+ * [x] Is the task an existing benchmark in the literature?
40
+ * [x] Have you referenced the original paper that introduced the task?
41
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
42
+
43
+
44
+ If other tasks on this dataset are already supported:
45
+ * [ ] Is the "Main" variant of this task clearly denoted?
46
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
47
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_bn_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: bn
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_es_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: es
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
rag-evaluation-harness/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_fr_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: fr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null